From f919e1bc6f6b0b1012c0959ce6b71145ab59f1ec Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Tue, 17 Mar 2026 15:09:11 -0700 Subject: [PATCH 01/13] Checkpointing initial pass at merging the DCL --- mmv1/third_party/terraform/go.mod | 1 - .../provider/provider_dcl_resources.go.tmpl | 37 + .../services/apikeys/apikeys_utils.go.tmpl | 101 + .../terraform/services/apikeys/client.go.tmpl | 18 + .../terraform/services/apikeys/key.go.tmpl | 662 + .../services/apikeys/key_internal.go.tmpl | 2909 +++ .../services/apikeys/key_schema.go.tmpl | 310 + .../apikeys/provider_dcl_client_creation.go | 30 + .../services/apikeys/resource_apikeys_key.go | 697 + ...esource_apikeys_key_generated_test.go.tmpl | 452 + .../apikeys/resource_apikeys_key_sweeper.go | 53 + .../assuredworkloads_utils.go.tmpl | 137 + .../services/assuredworkloads/client.go.tmpl | 18 + .../provider_dcl_client_creation.go | 30 + ...esource_assured_workloads_workload.go.tmpl | 971 + ..._workloads_workload_generated_test.go.tmpl | 392 + .../assuredworkloads/workload.go.tmpl | 1121 + .../workload_internal.go.tmpl | 4151 ++++ .../assuredworkloads/workload_schema.go.tmpl | 552 + .../services/cloudbuild/client.go.tmpl | 18 + .../cloudbuild/cloudbuild_utils.go.tmpl | 103 + .../provider_dcl_client_creation.go | 30 + .../resource_cloudbuild_worker_pool.go | 573 + ...resource_cloudbuild_worker_pool_sweeper.go | 53 + .../services/cloudbuild/worker_pool.go.tmpl | 802 + .../cloudbuild/worker_pool_internal.go.tmpl | 3509 +++ .../cloudbuild/worker_pool_schema.go.tmpl | 296 + .../services/clouddeploy/client.go.tmpl | 18 + .../clouddeploy/delivery_pipeline.go.tmpl | 1658 ++ .../delivery_pipeline_internal.go.tmpl | 9009 +++++++ .../delivery_pipeline_schema.go.tmpl | 753 + .../provider_dcl_client_creation.go | 30 + .../resource_clouddeploy_delivery_pipeline.go | 1789 ++ ...y_delivery_pipeline_generated_test.go.tmpl | 755 + ...e_clouddeploy_delivery_pipeline_sweeper.go | 53 + .../resource_clouddeploy_target.go | 1154 + ..._clouddeploy_target_generated_test.go.tmpl | 544 + .../resource_clouddeploy_target_sweeper.go | 53 + .../services/clouddeploy/target.go.tmpl | 882 + .../clouddeploy/target_internal.go.tmpl | 4011 +++ .../clouddeploy/target_schema.go.tmpl | 488 + .../services/containeraws/client.go.tmpl | 18 + .../services/containeraws/cluster.go.tmpl | 1587 ++ .../containeraws/cluster_internal.go.tmpl | 7909 ++++++ .../containeraws/cluster_schema.go.tmpl | 754 + .../services/containeraws/node_pool.go.tmpl | 1314 + .../containeraws/node_pool_internal.go.tmpl | 6289 +++++ .../containeraws/node_pool_schema.go.tmpl | 661 + .../provider_dcl_client_creation.go | 30 + .../resource_container_aws_cluster.go.tmpl | 1579 ++ ...ntainer_aws_cluster_generated_test.go.tmpl | 1026 + .../resource_container_aws_node_pool.go.tmpl | 1384 ++ ...ainer_aws_node_pool_generated_test.go.tmpl | 1587 ++ .../containerazure/azure_client.go.tmpl | 371 + .../azure_client_internal.go.tmpl | 715 + .../azure_client_schema.go.tmpl | 179 + .../services/containerazure/client.go.tmpl | 18 + .../services/containerazure/cluster.go.tmpl | 1347 + .../containerazure/cluster_internal.go.tmpl | 6926 ++++++ .../containerazure/cluster_schema.go.tmpl | 658 + .../services/containerazure/node_pool.go.tmpl | 772 + .../containerazure/node_pool_internal.go.tmpl | 3346 +++ .../containerazure/node_pool_schema.go.tmpl | 417 + .../provider_dcl_client_creation.go | 30 + .../resource_container_azure_client.go | 272 + ...tainer_azure_client_generated_test.go.tmpl | 98 + .../resource_container_azure_cluster.go.tmpl | 1441 ++ ...ainer_azure_cluster_generated_test.go.tmpl | 672 + ...resource_container_azure_node_pool.go.tmpl | 841 + ...ner_azure_node_pool_generated_test.go.tmpl | 599 + .../terraform/services/dataplex/asset.go.tmpl | 1005 + .../services/dataplex/asset_internal.go.tmpl | 4139 ++++ .../services/dataplex/asset_schema.go.tmpl | 504 + .../services/dataplex/client.go.tmpl | 18 + .../services/dataplex/dataplex_utils.go.tmpl | 11 + .../terraform/services/dataplex/lake.go.tmpl | 603 + .../services/dataplex/lake_internal.go.tmpl | 2021 ++ .../services/dataplex/lake_schema.go.tmpl | 280 + .../dataplex/provider_dcl_client_creation.go | 30 + .../dataplex/resource_dataplex_asset.go | 909 + ...urce_dataplex_asset_generated_test.go.tmpl | 239 + .../dataplex/resource_dataplex_lake.go | 555 + ...ource_dataplex_lake_generated_test.go.tmpl | 132 + .../resource_dataplex_lake_sweeper.go | 53 + .../dataplex/resource_dataplex_zone.go | 731 + ...ource_dataplex_zone_generated_test.go.tmpl | 176 + .../terraform/services/dataplex/zone.go.tmpl | 744 + .../services/dataplex/zone_internal.go.tmpl | 2830 +++ .../services/dataplex/zone_schema.go.tmpl | 376 + .../dataproc/autoscaling_policy.go.tmpl | 571 + .../autoscaling_policy_internal.go.tmpl | 2102 ++ .../autoscaling_policy_schema.go.tmpl | 250 + .../services/dataproc/client.go.tmpl | 18 + .../services/dataproc/cluster.go.tmpl | 3457 +++ .../dataproc/cluster_internal.go.tmpl | 18408 ++++++++++++++ .../services/dataproc/cluster_schema.go.tmpl | 1941 ++ .../services/dataproc/dataproc_utils.go.tmpl | 32 + .../dataproc/provider_dcl_client_creation.go | 30 + ...esource_dataproc_workflow_template.go.tmpl | 4377 ++++ ...urce_dataproc_workflow_template_sweeper.go | 53 + .../dataproc/workflow_template.go.tmpl | 3645 +++ .../workflow_template_internal.go.tmpl | 20443 ++++++++++++++++ .../dataproc/workflow_template_schema.go.tmpl | 2230 ++ .../services/firebaserules/client.go.tmpl | 18 + .../provider_dcl_client_creation.go | 30 + .../services/firebaserules/release.go.tmpl | 365 + .../firebaserules/release_internal.go.tmpl | 614 + .../firebaserules/release_schema.go.tmpl | 158 + .../firebaserules/release_utils.go.tmpl | 13 + .../resource_firebaserules_release.go | 245 + ...rebaserules_release_generated_test.go.tmpl | 182 + .../resource_firebaserules_release_sweeper.go | 53 + .../resource_firebaserules_ruleset.go | 409 + ...rebaserules_ruleset_generated_test.go.tmpl | 139 + .../resource_firebaserules_ruleset_sweeper.go | 53 + .../services/firebaserules/ruleset.go.tmpl | 539 + .../firebaserules/ruleset_internal.go.tmpl | 1577 ++ .../firebaserules/ruleset_schema.go.tmpl | 211 + .../terraform/services/gkehub/client.go.tmpl | 18 + .../terraform/services/gkehub/feature.go.tmpl | 905 + .../services/gkehub/feature_internal.go.tmpl | 3616 +++ .../gkehub/feature_membership.go.tmpl | 1643 ++ .../feature_membership_internal.go.tmpl | 8174 ++++++ .../gkehub/feature_membership_schema.go.tmpl | 807 + .../services/gkehub/feature_schema.go.tmpl | 331 + .../services/gkehub/hub_beta_utils.go.tmpl | 122 + .../services/gkehub/hub_utils.go.tmpl | 368 + .../services/gkehub/membership.go.tmpl | 902 + .../gkehub/membership_internal.go.tmpl | 3830 +++ .../services/gkehub/membership_schema.go.tmpl | 410 + .../services/gkehub/poco_utils.go.tmpl | 27 + .../gkehub/provider_dcl_client_creation.go | 30 + ...esource_gke_hub_feature_membership.go.tmpl | 1894 ++ ...ce_gke_hub_feature_membership_test.go.tmpl | 9 +- .../recaptchaenterprise/client.go.tmpl | 18 + .../services/recaptchaenterprise/key.go.tmpl | 764 + .../recaptchaenterprise/key_internal.go.tmpl | 2750 +++ .../recaptchaenterprise/key_schema.go.tmpl | 317 + .../provider_dcl_client_creation.go | 30 + .../resource_recaptcha_enterprise_key.go | 689 + ...tcha_enterprise_key_generated_test.go.tmpl | 497 + ...source_recaptcha_enterprise_key_sweeper.go | 53 + .../terraform/tpgdclresource/canonicalize.go | 857 + .../terraform/tpgdclresource/client.go | 7 + .../terraform/tpgdclresource/config.go | 573 + .../terraform/tpgdclresource/context.go | 56 + .../terraform/tpgdclresource/dcl.go | 15 +- .../terraform/tpgdclresource/declarative.go | 370 + .../terraform/tpgdclresource/diff.go | 572 + .../terraform/tpgdclresource/diff_utils.go | 11 + .../terraform/tpgdclresource/errors.go | 142 + .../terraform/tpgdclresource/flatten.go | 70 + .../terraform/tpgdclresource/locations.go | 23 + .../terraform/tpgdclresource/marshallers.go | 333 + .../tpgdclresource/operations/compute.go | 117 + .../tpgdclresource/operations/crm.go | 123 + .../tpgdclresource/operations/datastore.go | 68 + .../tpgdclresource/operations/dns.go | 59 + .../tpgdclresource/operations/knative.go | 84 + .../tpgdclresource/operations/monitoring.go | 34 + .../tpgdclresource/operations/operations.go | 116 + .../tpgdclresource/operations/osconfig.go | 32 + .../tpgdclresource/operations/sql.go | 78 + .../terraform/tpgdclresource/project_id.go | 94 + .../terraform/tpgdclresource/resource.go | 22 + .../terraform/tpgdclresource/retry.go | 139 + .../terraform/tpgdclresource/schema.go | 211 + .../terraform/tpgdclresource/strings.go | 149 + .../terraform/tpgdclresource/timestamp.go | 17 + .../tpgdclresource/tpgtools_utils.go | 35 +- .../terraform/tpgdclresource/transport.go | 274 + .../terraform/tpgdclresource/type.go | 51 + .../terraform/tpgdclresource/update.go | 106 + .../terraform/tpgdclresource/utils.go | 16 + .../terraform/tpgdclresource/validate.go | 69 + .../tpgtools_custom_flattens.go.tmpl | 39 - 176 files changed, 192693 insertions(+), 55 deletions(-) create mode 100644 mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/key.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go create mode 100644 mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go create mode 100644 mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go create mode 100644 mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go create mode 100644 mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/asset.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/lake.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go create mode 100644 mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/zone.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go create mode 100644 mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/firebaserules/release.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go create mode 100644 mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/membership.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/canonicalize.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/client.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/config.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/context.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/declarative.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/diff.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/diff_utils.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/errors.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/flatten.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/locations.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/marshallers.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/compute.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/crm.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/datastore.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/dns.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/knative.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/operations.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/sql.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/project_id.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/resource.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/retry.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/schema.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/strings.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/timestamp.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/transport.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/type.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/update.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/utils.go create mode 100755 mmv1/third_party/terraform/tpgdclresource/validate.go delete mode 100644 mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index 9ff4de001473..ccac4a514523 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -7,7 +7,6 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 cloud.google.com/go/bigquery v1.73.1 cloud.google.com/go/bigtable v1.42.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.84.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl b/mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl new file mode 100644 index 000000000000..ab4567d5caae --- /dev/null +++ b/mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl @@ -0,0 +1,37 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + "github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuild" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub" + "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" +) + +var dclResources = map[string]*schema.Resource{ + "google_apikeys_key": apikeys.ResourceApikeysKey(), + "google_assured_workloads_workload": assuredworkloads.ResourceAssuredWorkloadsWorkload(), + "google_cloudbuild_worker_pool": cloudbuild.ResourceCloudbuildWorkerPool(), + "google_clouddeploy_delivery_pipeline": clouddeploy.ResourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": clouddeploy.ResourceClouddeployTarget(), + "google_container_aws_cluster": containeraws.ResourceContainerAwsCluster(), + "google_container_aws_node_pool": containeraws.ResourceContainerAwsNodePool(), + "google_container_azure_client": containerazure.ResourceContainerAzureClient(), + "google_container_azure_cluster": containerazure.ResourceContainerAzureCluster(), + "google_container_azure_node_pool": containerazure.ResourceContainerAzureNodePool(), + "google_dataplex_asset": dataplex.ResourceDataplexAsset(), + "google_dataplex_lake": dataplex.ResourceDataplexLake(), + "google_dataplex_zone": dataplex.ResourceDataplexZone(), + "google_dataproc_workflow_template": dataproc.ResourceDataprocWorkflowTemplate(), + "google_firebaserules_release": firebaserules.ResourceFirebaserulesRelease(), + "google_firebaserules_ruleset": firebaserules.ResourceFirebaserulesRuleset(), + "google_gke_hub_feature_membership": gkehub.ResourceGkeHubFeatureMembership(), + "google_recaptcha_enterprise_key": recaptchaenterprise.ResourceRecaptchaEnterpriseKey(), +} diff --git a/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl new file mode 100644 index 000000000000..9fb071bfb404 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl @@ -0,0 +1,101 @@ +package apikeys + +import ( + "bytes" + "context" + "encoding/json" + "io" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +func keyStringGetURL(userBasePath string, r *Key) (string, error) { + nr := r.urlNormalized() + params := map[string]any{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}/keyString", "https://apikeys.googleapis.com/v2/", userBasePath, params), nil +} + +func (c *Client) getKeyStringRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := keyStringGetURL(c.Config.BasePath, r) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) getKeyRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(60*time.Second)) + defer cancel() + + b, err := c.getKeyRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalKey(b, c, r) + if err != nil { + return nil, err + } + // Get the value of KeyString through a separate api method. + b, err = c.getKeyStringRaw(ctx, r) + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + result.KeyString = dcl.FlattenString(m["keyString"]) + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.Infof("Retrieved raw result state: %v", result) + c.Config.Logger.Infof("Canonicalizing with specified state: %v", r) + result, err = canonicalizeKeyNewState(c, result, r) + if err != nil { + return nil, err + } + c.Config.Logger.Infof("Created result state: %v", result) + + return result, nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/client.go.tmpl b/mmv1/third_party/terraform/services/apikeys/client.go.tmpl new file mode 100644 index 000000000000..737e48c574e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/client.go.tmpl @@ -0,0 +1,18 @@ +package apikeys + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/apikeys/key.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key.go.tmpl new file mode 100644 index 000000000000..3c57eb375de4 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/key.go.tmpl @@ -0,0 +1,662 @@ +package apikeys + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type Key struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + KeyString *string `json:"keyString"` + Uid *string `json:"uid"` + ServiceAccountEmail *string `json:"serviceAccountEmail"` + Restrictions *KeyRestrictions `json:"restrictions"` + Project *string `json:"project"` +} + +func (r *Key) String() string { + return dcl.SprintResource(r) +} + +type KeyRestrictions struct { + empty bool `json:"-"` + BrowserKeyRestrictions *KeyRestrictionsBrowserKeyRestrictions `json:"browserKeyRestrictions"` + ServerKeyRestrictions *KeyRestrictionsServerKeyRestrictions `json:"serverKeyRestrictions"` + AndroidKeyRestrictions *KeyRestrictionsAndroidKeyRestrictions `json:"androidKeyRestrictions"` + IosKeyRestrictions *KeyRestrictionsIosKeyRestrictions `json:"iosKeyRestrictions"` + ApiTargets []KeyRestrictionsApiTargets `json:"apiTargets"` +} + +type jsonKeyRestrictions KeyRestrictions + +func (r *KeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictions + } else { + + r.BrowserKeyRestrictions = res.BrowserKeyRestrictions + + r.ServerKeyRestrictions = res.ServerKeyRestrictions + + r.AndroidKeyRestrictions = res.AndroidKeyRestrictions + + r.IosKeyRestrictions = res.IosKeyRestrictions + + r.ApiTargets = res.ApiTargets + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictions *KeyRestrictions = &KeyRestrictions{empty: true} + +func (r *KeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsBrowserKeyRestrictions struct { + empty bool `json:"-"` + AllowedReferrers []string `json:"allowedReferrers"` +} + +type jsonKeyRestrictionsBrowserKeyRestrictions KeyRestrictionsBrowserKeyRestrictions + +func (r *KeyRestrictionsBrowserKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsBrowserKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsBrowserKeyRestrictions + } else { + + r.AllowedReferrers = res.AllowedReferrers + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsBrowserKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsBrowserKeyRestrictions *KeyRestrictionsBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{empty: true} + +func (r *KeyRestrictionsBrowserKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsBrowserKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsBrowserKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsServerKeyRestrictions struct { + empty bool `json:"-"` + AllowedIps []string `json:"allowedIps"` +} + +type jsonKeyRestrictionsServerKeyRestrictions KeyRestrictionsServerKeyRestrictions + +func (r *KeyRestrictionsServerKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsServerKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsServerKeyRestrictions + } else { + + r.AllowedIps = res.AllowedIps + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsServerKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsServerKeyRestrictions *KeyRestrictionsServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{empty: true} + +func (r *KeyRestrictionsServerKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsServerKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsServerKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsAndroidKeyRestrictions struct { + empty bool `json:"-"` + AllowedApplications []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications `json:"allowedApplications"` +} + +type jsonKeyRestrictionsAndroidKeyRestrictions KeyRestrictionsAndroidKeyRestrictions + +func (r *KeyRestrictionsAndroidKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsAndroidKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsAndroidKeyRestrictions + } else { + + r.AllowedApplications = res.AllowedApplications + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsAndroidKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsAndroidKeyRestrictions *KeyRestrictionsAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{empty: true} + +func (r *KeyRestrictionsAndroidKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsAndroidKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsAndroidKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsAndroidKeyRestrictionsAllowedApplications struct { + empty bool `json:"-"` + Sha1Fingerprint *string `json:"sha1Fingerprint"` + PackageName *string `json:"packageName"` +} + +type jsonKeyRestrictionsAndroidKeyRestrictionsAllowedApplications KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } else { + + r.Sha1Fingerprint = res.Sha1Fingerprint + + r.PackageName = res.PackageName + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsAndroidKeyRestrictionsAllowedApplications is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications = &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{empty: true} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsIosKeyRestrictions struct { + empty bool `json:"-"` + AllowedBundleIds []string `json:"allowedBundleIds"` +} + +type jsonKeyRestrictionsIosKeyRestrictions KeyRestrictionsIosKeyRestrictions + +func (r *KeyRestrictionsIosKeyRestrictions) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsIosKeyRestrictions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsIosKeyRestrictions + } else { + + r.AllowedBundleIds = res.AllowedBundleIds + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsIosKeyRestrictions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsIosKeyRestrictions *KeyRestrictionsIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{empty: true} + +func (r *KeyRestrictionsIosKeyRestrictions) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsIosKeyRestrictions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsIosKeyRestrictions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyRestrictionsApiTargets struct { + empty bool `json:"-"` + Service *string `json:"service"` + Methods []string `json:"methods"` +} + +type jsonKeyRestrictionsApiTargets KeyRestrictionsApiTargets + +func (r *KeyRestrictionsApiTargets) UnmarshalJSON(data []byte) error { + var res jsonKeyRestrictionsApiTargets + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyRestrictionsApiTargets + } else { + + r.Service = res.Service + + r.Methods = res.Methods + + } + return nil +} + +// This object is used to assert a desired state where this KeyRestrictionsApiTargets is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyRestrictionsApiTargets *KeyRestrictionsApiTargets = &KeyRestrictionsApiTargets{empty: true} + +func (r *KeyRestrictionsApiTargets) Empty() bool { + return r.empty +} + +func (r *KeyRestrictionsApiTargets) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyRestrictionsApiTargets) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Key) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "apikeys", + Type: "Key", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "apikeys", +{{- end }} + } +} + +func (r *Key) ID() (string, error) { + if err := extractKeyFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "key_string": dcl.ValueOrEmptyString(nr.KeyString), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "service_account_email": dcl.ValueOrEmptyString(nr.ServiceAccountEmail), + "restrictions": dcl.ValueOrEmptyString(nr.Restrictions), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const KeyMaxPage = -1 + +type KeyList struct { + Items []*Key + + nextToken string + + pageSize int32 + + resource *Key +} + +func (l *KeyList) HasNext() bool { + return l.nextToken != "" +} + +func (l *KeyList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listKey(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListKey(ctx context.Context, project string) (*KeyList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListKeyWithMaxResults(ctx, project, KeyMaxPage) + +} + +func (c *Client) ListKeyWithMaxResults(ctx context.Context, project string, pageSize int32) (*KeyList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Key{ + Project: &project, + } + items, token, err := c.listKey(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &KeyList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) DeleteKey(ctx context.Context, r *Key) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Key resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Key...") + deleteOp := deleteKeyOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllKey deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllKey(ctx context.Context, project string, filter func(*Key) bool) error { + listObj, err := c.ListKey(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyKey(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Key + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyKeyHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyKeyHelper(c *Client, ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyKey...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractKeyFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.keyDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToKeyDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []keyApiOperation + if create { + ops = append(ops, &createKeyOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyKeyDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyKeyDiff(c *Client, ctx context.Context, desired *Key, rawDesired *Key, ops []keyApiOperation, opts ...dcl.ApplyOption) (*Key, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetKey(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createKeyOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapKey(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeKeyNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeKeyNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeKeyDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractKeyFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractKeyFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffKey(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl new file mode 100644 index 000000000000..41f67fb2bf16 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl @@ -0,0 +1,2909 @@ +package apikeys + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Key) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Restrictions) { + if err := r.Restrictions.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyRestrictions) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"BrowserKeyRestrictions", "ServerKeyRestrictions", "AndroidKeyRestrictions", "IosKeyRestrictions"}, r.BrowserKeyRestrictions, r.ServerKeyRestrictions, r.AndroidKeyRestrictions, r.IosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.BrowserKeyRestrictions) { + if err := r.BrowserKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServerKeyRestrictions) { + if err := r.ServerKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AndroidKeyRestrictions) { + if err := r.AndroidKeyRestrictions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.IosKeyRestrictions) { + if err := r.IosKeyRestrictions.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyRestrictionsBrowserKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedReferrers"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsServerKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedIps"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsAndroidKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedApplications"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) validate() error { + if err := dcl.Required(r, "sha1Fingerprint"); err != nil { + return err + } + if err := dcl.Required(r, "packageName"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsIosKeyRestrictions) validate() error { + if err := dcl.Required(r, "allowedBundleIds"); err != nil { + return err + } + return nil +} +func (r *KeyRestrictionsApiTargets) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + return nil +} +func (r *Key) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://apikeys.googleapis.com/v2/", params) +} + +func (r *Key) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Key) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys?keyId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// keyApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type keyApiOperation interface { + do(context.Context, *Key, *Client) error +} + +// newUpdateKeyUpdateKeyRequest creates a request for an +// Key resource's UpdateKey update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateKeyUpdateKeyRequest(ctx context.Context, f *Key, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v, err := expandKeyRestrictions(c, f.Restrictions, res); err != nil { + return nil, fmt.Errorf("error expanding Restrictions into restrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["restrictions"] = v + } + return req, nil +} + +// marshalUpdateKeyUpdateKeyRequest converts the update into +// the final JSON request body. +func marshalUpdateKeyUpdateKeyRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateKeyUpdateKeyOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateKeyUpdateKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + _, err := c.GetKey(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateKey") + if err != nil { + return err + } + mask := dcl.TopLevelUpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateKeyUpdateKeyRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateKeyUpdateKeyRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listKeyRaw(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != KeyMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listKeyOperation struct { + Keys []map[string]interface{} `json:"keys"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listKey(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]*Key, string, error) { + b, err := c.listKeyRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listKeyOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Key + for _, v := range m.Keys { + res, err := unmarshalMapKey(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllKey(ctx context.Context, f func(*Key) bool, resources []*Key) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteKey(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteKeyOperation struct{} + +func (op *deleteKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + r, err := c.GetKey(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Key not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetKey checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createKeyOperation struct { + response map[string]interface{} +} + +func (op *createKeyOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetKey(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) keyDiffsForRawDesired(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (initial, desired *Key, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Key + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Key); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Key, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetKey(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Key resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Key resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Key resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Key: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Key: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractKeyFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeKeyInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Key: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Key: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffKey(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeKeyInitialState(rawInitial, rawDesired *Key) (*Key, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeKeyDesiredState(rawDesired, rawInitial *Key, opts ...dcl.ApplyOption) (*Key, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Restrictions = canonicalizeKeyRestrictions(rawDesired.Restrictions, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Key{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.ServiceAccountEmail) || (dcl.IsEmptyValueIndirect(rawDesired.ServiceAccountEmail) && dcl.IsEmptyValueIndirect(rawInitial.ServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.ServiceAccountEmail = rawInitial.ServiceAccountEmail + } else { + canonicalDesired.ServiceAccountEmail = rawDesired.ServiceAccountEmail + } + canonicalDesired.Restrictions = canonicalizeKeyRestrictions(rawDesired.Restrictions, rawInitial.Restrictions, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeKeyNewState(c *Client, rawNew, rawDesired *Key) (*Key, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.KeyString) && dcl.IsEmptyValueIndirect(rawDesired.KeyString) { + rawNew.KeyString = rawDesired.KeyString + } else { + if dcl.StringCanonicalize(rawDesired.KeyString, rawNew.KeyString) { + rawNew.KeyString = rawDesired.KeyString + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ServiceAccountEmail) && dcl.IsEmptyValueIndirect(rawDesired.ServiceAccountEmail) { + rawNew.ServiceAccountEmail = rawDesired.ServiceAccountEmail + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Restrictions) && dcl.IsEmptyValueIndirect(rawDesired.Restrictions) { + rawNew.Restrictions = rawDesired.Restrictions + } else { + rawNew.Restrictions = canonicalizeNewKeyRestrictions(c, rawDesired.Restrictions, rawNew.Restrictions) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeKeyRestrictions(des, initial *KeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.BrowserKeyRestrictions != nil || (initial != nil && initial.BrowserKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ServerKeyRestrictions, des.AndroidKeyRestrictions, des.IosKeyRestrictions) { + des.BrowserKeyRestrictions = nil + if initial != nil { + initial.BrowserKeyRestrictions = nil + } + } + } + + if des.ServerKeyRestrictions != nil || (initial != nil && initial.ServerKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.AndroidKeyRestrictions, des.IosKeyRestrictions) { + des.ServerKeyRestrictions = nil + if initial != nil { + initial.ServerKeyRestrictions = nil + } + } + } + + if des.AndroidKeyRestrictions != nil || (initial != nil && initial.AndroidKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.ServerKeyRestrictions, des.IosKeyRestrictions) { + des.AndroidKeyRestrictions = nil + if initial != nil { + initial.AndroidKeyRestrictions = nil + } + } + } + + if des.IosKeyRestrictions != nil || (initial != nil && initial.IosKeyRestrictions != nil) { + // Check if anything else is set. + if dcl.AnySet(des.BrowserKeyRestrictions, des.ServerKeyRestrictions, des.AndroidKeyRestrictions) { + des.IosKeyRestrictions = nil + if initial != nil { + initial.IosKeyRestrictions = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictions{} + + cDes.BrowserKeyRestrictions = canonicalizeKeyRestrictionsBrowserKeyRestrictions(des.BrowserKeyRestrictions, initial.BrowserKeyRestrictions, opts...) + cDes.ServerKeyRestrictions = canonicalizeKeyRestrictionsServerKeyRestrictions(des.ServerKeyRestrictions, initial.ServerKeyRestrictions, opts...) + cDes.AndroidKeyRestrictions = canonicalizeKeyRestrictionsAndroidKeyRestrictions(des.AndroidKeyRestrictions, initial.AndroidKeyRestrictions, opts...) + cDes.IosKeyRestrictions = canonicalizeKeyRestrictionsIosKeyRestrictions(des.IosKeyRestrictions, initial.IosKeyRestrictions, opts...) + cDes.ApiTargets = canonicalizeKeyRestrictionsApiTargetsSlice(des.ApiTargets, initial.ApiTargets, opts...) + + return cDes +} + +func canonicalizeKeyRestrictionsSlice(des, initial []KeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictions(c *Client, des, nw *KeyRestrictions) *KeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.BrowserKeyRestrictions = canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, des.BrowserKeyRestrictions, nw.BrowserKeyRestrictions) + nw.ServerKeyRestrictions = canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, des.ServerKeyRestrictions, nw.ServerKeyRestrictions) + nw.AndroidKeyRestrictions = canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, des.AndroidKeyRestrictions, nw.AndroidKeyRestrictions) + nw.IosKeyRestrictions = canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, des.IosKeyRestrictions, nw.IosKeyRestrictions) + nw.ApiTargets = canonicalizeNewKeyRestrictionsApiTargetsSlice(c, des.ApiTargets, nw.ApiTargets) + + return nw +} + +func canonicalizeNewKeyRestrictionsSet(c *Client, des, nw []KeyRestrictions) []KeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictions) []KeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsBrowserKeyRestrictions(des, initial *KeyRestrictionsBrowserKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsBrowserKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedReferrers, initial.AllowedReferrers) { + cDes.AllowedReferrers = initial.AllowedReferrers + } else { + cDes.AllowedReferrers = des.AllowedReferrers + } + + return cDes +} + +func canonicalizeKeyRestrictionsBrowserKeyRestrictionsSlice(des, initial []KeyRestrictionsBrowserKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsBrowserKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsBrowserKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsBrowserKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c *Client, des, nw *KeyRestrictionsBrowserKeyRestrictions) *KeyRestrictionsBrowserKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsBrowserKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedReferrers, nw.AllowedReferrers) { + nw.AllowedReferrers = des.AllowedReferrers + } + + return nw +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsBrowserKeyRestrictions) []KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsBrowserKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsBrowserKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsBrowserKeyRestrictions) []KeyRestrictionsBrowserKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsBrowserKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsBrowserKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsServerKeyRestrictions(des, initial *KeyRestrictionsServerKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsServerKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsServerKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedIps, initial.AllowedIps) { + cDes.AllowedIps = initial.AllowedIps + } else { + cDes.AllowedIps = des.AllowedIps + } + + return cDes +} + +func canonicalizeKeyRestrictionsServerKeyRestrictionsSlice(des, initial []KeyRestrictionsServerKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsServerKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsServerKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsServerKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictions(c *Client, des, nw *KeyRestrictionsServerKeyRestrictions) *KeyRestrictionsServerKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsServerKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedIps, nw.AllowedIps) { + nw.AllowedIps = des.AllowedIps + } + + return nw +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsServerKeyRestrictions) []KeyRestrictionsServerKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsServerKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsServerKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsServerKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsServerKeyRestrictions) []KeyRestrictionsServerKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsServerKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsServerKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictions(des, initial *KeyRestrictionsAndroidKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsAndroidKeyRestrictions{} + + cDes.AllowedApplications = canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(des.AllowedApplications, initial.AllowedApplications, opts...) + + return cDes +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsSlice(des, initial []KeyRestrictionsAndroidKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsAndroidKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c *Client, des, nw *KeyRestrictionsAndroidKeyRestrictions) *KeyRestrictionsAndroidKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsAndroidKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AllowedApplications = canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, des.AllowedApplications, nw.AllowedApplications) + + return nw +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictions) []KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsAndroidKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsAndroidKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictions) []KeyRestrictionsAndroidKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsAndroidKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(des, initial *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, opts ...dcl.ApplyOption) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + + if dcl.StringCanonicalize(des.Sha1Fingerprint, initial.Sha1Fingerprint) || dcl.IsZeroValue(des.Sha1Fingerprint) { + cDes.Sha1Fingerprint = initial.Sha1Fingerprint + } else { + cDes.Sha1Fingerprint = des.Sha1Fingerprint + } + if dcl.StringCanonicalize(des.PackageName, initial.PackageName) || dcl.IsZeroValue(des.PackageName) { + cDes.PackageName = initial.PackageName + } else { + cDes.PackageName = des.PackageName + } + + return cDes +} + +func canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(des, initial []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, opts ...dcl.ApplyOption) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, des, nw *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsAndroidKeyRestrictionsAllowedApplications while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Sha1Fingerprint, nw.Sha1Fingerprint) { + nw.Sha1Fingerprint = des.Sha1Fingerprint + } + if dcl.StringCanonicalize(des.PackageName, nw.PackageName) { + nw.PackageName = des.PackageName + } + + return nw +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSet(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, des, nw []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsIosKeyRestrictions(des, initial *KeyRestrictionsIosKeyRestrictions, opts ...dcl.ApplyOption) *KeyRestrictionsIosKeyRestrictions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsIosKeyRestrictions{} + + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, initial.AllowedBundleIds) { + cDes.AllowedBundleIds = initial.AllowedBundleIds + } else { + cDes.AllowedBundleIds = des.AllowedBundleIds + } + + return cDes +} + +func canonicalizeKeyRestrictionsIosKeyRestrictionsSlice(des, initial []KeyRestrictionsIosKeyRestrictions, opts ...dcl.ApplyOption) []KeyRestrictionsIosKeyRestrictions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsIosKeyRestrictions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsIosKeyRestrictions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictions(c *Client, des, nw *KeyRestrictionsIosKeyRestrictions) *KeyRestrictionsIosKeyRestrictions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsIosKeyRestrictions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, nw.AllowedBundleIds) { + nw.AllowedBundleIds = des.AllowedBundleIds + } + + return nw +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictionsSet(c *Client, des, nw []KeyRestrictionsIosKeyRestrictions) []KeyRestrictionsIosKeyRestrictions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsIosKeyRestrictions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsIosKeyRestrictionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsIosKeyRestrictionsSlice(c *Client, des, nw []KeyRestrictionsIosKeyRestrictions) []KeyRestrictionsIosKeyRestrictions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsIosKeyRestrictions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsIosKeyRestrictions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyRestrictionsApiTargets(des, initial *KeyRestrictionsApiTargets, opts ...dcl.ApplyOption) *KeyRestrictionsApiTargets { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyRestrictionsApiTargets{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringArrayCanonicalize(des.Methods, initial.Methods) { + cDes.Methods = initial.Methods + } else { + cDes.Methods = des.Methods + } + + return cDes +} + +func canonicalizeKeyRestrictionsApiTargetsSlice(des, initial []KeyRestrictionsApiTargets, opts ...dcl.ApplyOption) []KeyRestrictionsApiTargets { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyRestrictionsApiTargets, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyRestrictionsApiTargets(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyRestrictionsApiTargets, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyRestrictionsApiTargets(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyRestrictionsApiTargets(c *Client, des, nw *KeyRestrictionsApiTargets) *KeyRestrictionsApiTargets { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyRestrictionsApiTargets while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringArrayCanonicalize(des.Methods, nw.Methods) { + nw.Methods = des.Methods + } + + return nw +} + +func canonicalizeNewKeyRestrictionsApiTargetsSet(c *Client, des, nw []KeyRestrictionsApiTargets) []KeyRestrictionsApiTargets { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyRestrictionsApiTargets + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyRestrictionsApiTargetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyRestrictionsApiTargets(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyRestrictionsApiTargetsSlice(c *Client, des, nw []KeyRestrictionsApiTargets) []KeyRestrictionsApiTargets { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyRestrictionsApiTargets + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyRestrictionsApiTargets(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffKey(c *Client, desired, actual *Key, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeyString, actual.KeyString, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyString")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccountEmail, actual.ServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Restrictions, actual.Restrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Restrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictions or *KeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BrowserKeyRestrictions, actual.BrowserKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsBrowserKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsBrowserKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("BrowserKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServerKeyRestrictions, actual.ServerKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsServerKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsServerKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ServerKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AndroidKeyRestrictions, actual.AndroidKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsAndroidKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsAndroidKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AndroidKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IosKeyRestrictions, actual.IosKeyRestrictions, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsIosKeyRestrictionsNewStyle, EmptyObject: EmptyKeyRestrictionsIosKeyRestrictions, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("IosKeyRestrictions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApiTargets, actual.ApiTargets, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsApiTargetsNewStyle, EmptyObject: EmptyKeyRestrictionsApiTargets, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ApiTargets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsBrowserKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsBrowserKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsBrowserKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsBrowserKeyRestrictions or *KeyRestrictionsBrowserKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsBrowserKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsBrowserKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsBrowserKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedReferrers, actual.AllowedReferrers, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedReferrers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsServerKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsServerKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsServerKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsServerKeyRestrictions or *KeyRestrictionsServerKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsServerKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsServerKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsServerKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedIps, actual.AllowedIps, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedIps")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsAndroidKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsAndroidKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsAndroidKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictions or *KeyRestrictionsAndroidKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsAndroidKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsAndroidKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedApplications, actual.AllowedApplications, dcl.DiffInfo{ObjectFunction: compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle, EmptyObject: EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedApplications")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictionsAllowedApplications or *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Sha1Fingerprint, actual.Sha1Fingerprint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Sha1Fingerprint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PackageName, actual.PackageName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("PackageName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsIosKeyRestrictionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsIosKeyRestrictions) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsIosKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsIosKeyRestrictions or *KeyRestrictionsIosKeyRestrictions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsIosKeyRestrictions) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsIosKeyRestrictions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsIosKeyRestrictions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowedBundleIds, actual.AllowedBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyRestrictionsApiTargetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyRestrictionsApiTargets) + if !ok { + desiredNotPointer, ok := d.(KeyRestrictionsApiTargets) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsApiTargets or *KeyRestrictionsApiTargets", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyRestrictionsApiTargets) + if !ok { + actualNotPointer, ok := a.(KeyRestrictionsApiTargets) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyRestrictionsApiTargets", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Methods, actual.Methods, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Methods")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Key) urlNormalized() *Key { + normalized := dcl.Copy(*r).(Key) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.KeyString = dcl.SelfLinkToName(r.KeyString) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.ServiceAccountEmail = dcl.SelfLinkToName(r.ServiceAccountEmail) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Key) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateKey" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Key resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Key) marshal(c *Client) ([]byte, error) { + m, err := expandKey(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Key: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalKey decodes JSON responses into the Key resource schema. +func unmarshalKey(b []byte, c *Client, res *Key) (*Key, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapKey(m, c, res) +} + +func unmarshalMapKey(m map[string]interface{}, c *Client, res *Key) (*Key, error) { + + flattened := flattenKey(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandKey expands Key into a JSON request object. +func expandKey(c *Client, f *Key) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.ServiceAccountEmail; dcl.ValueShouldBeSent(v) { + m["serviceAccountEmail"] = v + } + if v, err := expandKeyRestrictions(c, f.Restrictions, res); err != nil { + return nil, fmt.Errorf("error expanding Restrictions into restrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["restrictions"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenKey flattens Key from a JSON request object into the +// Key type. +func flattenKey(c *Client, i interface{}, res *Key) *Key { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Key{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.KeyString = dcl.FlattenString(m["keyString"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.ServiceAccountEmail = dcl.FlattenString(m["serviceAccountEmail"]) + resultRes.Restrictions = flattenKeyRestrictions(c, m["restrictions"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandKeyRestrictionsMap expands the contents of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsMap(c *Client, f map[string]KeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsSlice expands the contents of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsSlice(c *Client, f []KeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsMap flattens the contents of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictions{} + } + + items := make(map[string]KeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsSlice flattens the contents of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictions{} + } + + items := make([]KeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictions expands an instance of KeyRestrictions into a JSON +// request object. +func expandKeyRestrictions(c *Client, f *KeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandKeyRestrictionsBrowserKeyRestrictions(c, f.BrowserKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding BrowserKeyRestrictions into browserKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["browserKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsServerKeyRestrictions(c, f.ServerKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding ServerKeyRestrictions into serverKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serverKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsAndroidKeyRestrictions(c, f.AndroidKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidKeyRestrictions into androidKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["androidKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsIosKeyRestrictions(c, f.IosKeyRestrictions, res); err != nil { + return nil, fmt.Errorf("error expanding IosKeyRestrictions into iosKeyRestrictions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["iosKeyRestrictions"] = v + } + if v, err := expandKeyRestrictionsApiTargetsSlice(c, f.ApiTargets, res); err != nil { + return nil, fmt.Errorf("error expanding ApiTargets into apiTargets: %w", err) + } else if v != nil { + m["apiTargets"] = v + } + + return m, nil +} + +// flattenKeyRestrictions flattens an instance of KeyRestrictions from a JSON +// response object. +func flattenKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictions + } + r.BrowserKeyRestrictions = flattenKeyRestrictionsBrowserKeyRestrictions(c, m["browserKeyRestrictions"], res) + r.ServerKeyRestrictions = flattenKeyRestrictionsServerKeyRestrictions(c, m["serverKeyRestrictions"], res) + r.AndroidKeyRestrictions = flattenKeyRestrictionsAndroidKeyRestrictions(c, m["androidKeyRestrictions"], res) + r.IosKeyRestrictions = flattenKeyRestrictionsIosKeyRestrictions(c, m["iosKeyRestrictions"], res) + r.ApiTargets = flattenKeyRestrictionsApiTargetsSlice(c, m["apiTargets"], res) + + return r +} + +// expandKeyRestrictionsBrowserKeyRestrictionsMap expands the contents of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsBrowserKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsBrowserKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsBrowserKeyRestrictionsSlice expands the contents of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, f []KeyRestrictionsBrowserKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsBrowserKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsBrowserKeyRestrictionsMap flattens the contents of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsBrowserKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsBrowserKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsBrowserKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsBrowserKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsBrowserKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsBrowserKeyRestrictionsSlice flattens the contents of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsBrowserKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsBrowserKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsBrowserKeyRestrictions{} + } + + items := make([]KeyRestrictionsBrowserKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsBrowserKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsBrowserKeyRestrictions expands an instance of KeyRestrictionsBrowserKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsBrowserKeyRestrictions(c *Client, f *KeyRestrictionsBrowserKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedReferrers; v != nil { + m["allowedReferrers"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsBrowserKeyRestrictions flattens an instance of KeyRestrictionsBrowserKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsBrowserKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsBrowserKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsBrowserKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + r.AllowedReferrers = dcl.FlattenStringSlice(m["allowedReferrers"]) + + return r +} + +// expandKeyRestrictionsServerKeyRestrictionsMap expands the contents of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsServerKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsServerKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsServerKeyRestrictionsSlice expands the contents of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictionsSlice(c *Client, f []KeyRestrictionsServerKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsServerKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsServerKeyRestrictionsMap flattens the contents of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsServerKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsServerKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsServerKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsServerKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsServerKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsServerKeyRestrictionsSlice flattens the contents of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsServerKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsServerKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsServerKeyRestrictions{} + } + + items := make([]KeyRestrictionsServerKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsServerKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsServerKeyRestrictions expands an instance of KeyRestrictionsServerKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsServerKeyRestrictions(c *Client, f *KeyRestrictionsServerKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedIps; v != nil { + m["allowedIps"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsServerKeyRestrictions flattens an instance of KeyRestrictionsServerKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsServerKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsServerKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsServerKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsServerKeyRestrictions + } + r.AllowedIps = dcl.FlattenStringSlice(m["allowedIps"]) + + return r +} + +// expandKeyRestrictionsAndroidKeyRestrictionsMap expands the contents of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsAndroidKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsAndroidKeyRestrictionsSlice expands the contents of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, f []KeyRestrictionsAndroidKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsMap flattens the contents of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsAndroidKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsAndroidKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsAndroidKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsAndroidKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsAndroidKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsSlice flattens the contents of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsAndroidKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsAndroidKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsAndroidKeyRestrictions{} + } + + items := make([]KeyRestrictionsAndroidKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsAndroidKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsAndroidKeyRestrictions expands an instance of KeyRestrictionsAndroidKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictions(c *Client, f *KeyRestrictionsAndroidKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, f.AllowedApplications, res); err != nil { + return nil, fmt.Errorf("error expanding AllowedApplications into allowedApplications: %w", err) + } else if v != nil { + m["allowedApplications"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictions flattens an instance of KeyRestrictionsAndroidKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsAndroidKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsAndroidKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + r.AllowedApplications = flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c, m["allowedApplications"], res) + + return r +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap expands the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap(c *Client, f map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice expands the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, f []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap flattens the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + items := make(map[string]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) + for k, item := range a { + items[k] = *flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice flattens the contents of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + if len(a) == 0 { + return []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications expands an instance of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications into a JSON +// request object. +func expandKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, f *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Sha1Fingerprint; !dcl.IsEmptyValueIndirect(v) { + m["sha1Fingerprint"] = v + } + if v := f.PackageName; !dcl.IsEmptyValueIndirect(v) { + m["packageName"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications flattens an instance of KeyRestrictionsAndroidKeyRestrictionsAllowedApplications from a JSON +// response object. +func flattenKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(c *Client, i interface{}, res *Key) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } + r.Sha1Fingerprint = dcl.FlattenString(m["sha1Fingerprint"]) + r.PackageName = dcl.FlattenString(m["packageName"]) + + return r +} + +// expandKeyRestrictionsIosKeyRestrictionsMap expands the contents of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictionsMap(c *Client, f map[string]KeyRestrictionsIosKeyRestrictions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsIosKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsIosKeyRestrictionsSlice expands the contents of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictionsSlice(c *Client, f []KeyRestrictionsIosKeyRestrictions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsIosKeyRestrictions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsIosKeyRestrictionsMap flattens the contents of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictionsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsIosKeyRestrictions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsIosKeyRestrictions{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsIosKeyRestrictions{} + } + + items := make(map[string]KeyRestrictionsIosKeyRestrictions) + for k, item := range a { + items[k] = *flattenKeyRestrictionsIosKeyRestrictions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsIosKeyRestrictionsSlice flattens the contents of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictionsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsIosKeyRestrictions { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsIosKeyRestrictions{} + } + + if len(a) == 0 { + return []KeyRestrictionsIosKeyRestrictions{} + } + + items := make([]KeyRestrictionsIosKeyRestrictions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsIosKeyRestrictions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsIosKeyRestrictions expands an instance of KeyRestrictionsIosKeyRestrictions into a JSON +// request object. +func expandKeyRestrictionsIosKeyRestrictions(c *Client, f *KeyRestrictionsIosKeyRestrictions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowedBundleIds; v != nil { + m["allowedBundleIds"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsIosKeyRestrictions flattens an instance of KeyRestrictionsIosKeyRestrictions from a JSON +// response object. +func flattenKeyRestrictionsIosKeyRestrictions(c *Client, i interface{}, res *Key) *KeyRestrictionsIosKeyRestrictions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsIosKeyRestrictions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsIosKeyRestrictions + } + r.AllowedBundleIds = dcl.FlattenStringSlice(m["allowedBundleIds"]) + + return r +} + +// expandKeyRestrictionsApiTargetsMap expands the contents of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargetsMap(c *Client, f map[string]KeyRestrictionsApiTargets, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyRestrictionsApiTargets(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyRestrictionsApiTargetsSlice expands the contents of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargetsSlice(c *Client, f []KeyRestrictionsApiTargets, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyRestrictionsApiTargets(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyRestrictionsApiTargetsMap flattens the contents of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargetsMap(c *Client, i interface{}, res *Key) map[string]KeyRestrictionsApiTargets { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyRestrictionsApiTargets{} + } + + if len(a) == 0 { + return map[string]KeyRestrictionsApiTargets{} + } + + items := make(map[string]KeyRestrictionsApiTargets) + for k, item := range a { + items[k] = *flattenKeyRestrictionsApiTargets(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyRestrictionsApiTargetsSlice flattens the contents of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargetsSlice(c *Client, i interface{}, res *Key) []KeyRestrictionsApiTargets { + a, ok := i.([]interface{}) + if !ok { + return []KeyRestrictionsApiTargets{} + } + + if len(a) == 0 { + return []KeyRestrictionsApiTargets{} + } + + items := make([]KeyRestrictionsApiTargets, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyRestrictionsApiTargets(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyRestrictionsApiTargets expands an instance of KeyRestrictionsApiTargets into a JSON +// request object. +func expandKeyRestrictionsApiTargets(c *Client, f *KeyRestrictionsApiTargets, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Methods; v != nil { + m["methods"] = v + } + + return m, nil +} + +// flattenKeyRestrictionsApiTargets flattens an instance of KeyRestrictionsApiTargets from a JSON +// response object. +func flattenKeyRestrictionsApiTargets(c *Client, i interface{}, res *Key) *KeyRestrictionsApiTargets { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyRestrictionsApiTargets{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyRestrictionsApiTargets + } + r.Service = dcl.FlattenString(m["service"]) + r.Methods = dcl.FlattenStringSlice(m["methods"]) + + return r +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Key) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalKey(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type keyDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp keyApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToKeyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]keyDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []keyDiff + // For each operation name, create a keyDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := keyDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToKeyApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToKeyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (keyApiOperation, error) { + switch opName { + + case "updateKeyUpdateKeyOperation": + return &updateKeyUpdateKeyOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractKeyFields(r *Key) error { + vRestrictions := r.Restrictions + if vRestrictions == nil { + // note: explicitly not the empty object. + vRestrictions = &KeyRestrictions{} + } + if err := extractKeyRestrictionsFields(r, vRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRestrictions) { + r.Restrictions = vRestrictions + } + return nil +} +func extractKeyRestrictionsFields(r *Key, o *KeyRestrictions) error { + vBrowserKeyRestrictions := o.BrowserKeyRestrictions + if vBrowserKeyRestrictions == nil { + // note: explicitly not the empty object. + vBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{} + } + if err := extractKeyRestrictionsBrowserKeyRestrictionsFields(r, vBrowserKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBrowserKeyRestrictions) { + o.BrowserKeyRestrictions = vBrowserKeyRestrictions + } + vServerKeyRestrictions := o.ServerKeyRestrictions + if vServerKeyRestrictions == nil { + // note: explicitly not the empty object. + vServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{} + } + if err := extractKeyRestrictionsServerKeyRestrictionsFields(r, vServerKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServerKeyRestrictions) { + o.ServerKeyRestrictions = vServerKeyRestrictions + } + vAndroidKeyRestrictions := o.AndroidKeyRestrictions + if vAndroidKeyRestrictions == nil { + // note: explicitly not the empty object. + vAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{} + } + if err := extractKeyRestrictionsAndroidKeyRestrictionsFields(r, vAndroidKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidKeyRestrictions) { + o.AndroidKeyRestrictions = vAndroidKeyRestrictions + } + vIosKeyRestrictions := o.IosKeyRestrictions + if vIosKeyRestrictions == nil { + // note: explicitly not the empty object. + vIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{} + } + if err := extractKeyRestrictionsIosKeyRestrictionsFields(r, vIosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosKeyRestrictions) { + o.IosKeyRestrictions = vIosKeyRestrictions + } + return nil +} +func extractKeyRestrictionsBrowserKeyRestrictionsFields(r *Key, o *KeyRestrictionsBrowserKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsServerKeyRestrictionsFields(r *Key, o *KeyRestrictionsServerKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsAndroidKeyRestrictionsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) error { + return nil +} +func extractKeyRestrictionsIosKeyRestrictionsFields(r *Key, o *KeyRestrictionsIosKeyRestrictions) error { + return nil +} +func extractKeyRestrictionsApiTargetsFields(r *Key, o *KeyRestrictionsApiTargets) error { + return nil +} + +func postReadExtractKeyFields(r *Key) error { + vRestrictions := r.Restrictions + if vRestrictions == nil { + // note: explicitly not the empty object. + vRestrictions = &KeyRestrictions{} + } + if err := postReadExtractKeyRestrictionsFields(r, vRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRestrictions) { + r.Restrictions = vRestrictions + } + return nil +} +func postReadExtractKeyRestrictionsFields(r *Key, o *KeyRestrictions) error { + vBrowserKeyRestrictions := o.BrowserKeyRestrictions + if vBrowserKeyRestrictions == nil { + // note: explicitly not the empty object. + vBrowserKeyRestrictions = &KeyRestrictionsBrowserKeyRestrictions{} + } + if err := extractKeyRestrictionsBrowserKeyRestrictionsFields(r, vBrowserKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBrowserKeyRestrictions) { + o.BrowserKeyRestrictions = vBrowserKeyRestrictions + } + vServerKeyRestrictions := o.ServerKeyRestrictions + if vServerKeyRestrictions == nil { + // note: explicitly not the empty object. + vServerKeyRestrictions = &KeyRestrictionsServerKeyRestrictions{} + } + if err := extractKeyRestrictionsServerKeyRestrictionsFields(r, vServerKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServerKeyRestrictions) { + o.ServerKeyRestrictions = vServerKeyRestrictions + } + vAndroidKeyRestrictions := o.AndroidKeyRestrictions + if vAndroidKeyRestrictions == nil { + // note: explicitly not the empty object. + vAndroidKeyRestrictions = &KeyRestrictionsAndroidKeyRestrictions{} + } + if err := extractKeyRestrictionsAndroidKeyRestrictionsFields(r, vAndroidKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidKeyRestrictions) { + o.AndroidKeyRestrictions = vAndroidKeyRestrictions + } + vIosKeyRestrictions := o.IosKeyRestrictions + if vIosKeyRestrictions == nil { + // note: explicitly not the empty object. + vIosKeyRestrictions = &KeyRestrictionsIosKeyRestrictions{} + } + if err := extractKeyRestrictionsIosKeyRestrictionsFields(r, vIosKeyRestrictions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosKeyRestrictions) { + o.IosKeyRestrictions = vIosKeyRestrictions + } + return nil +} +func postReadExtractKeyRestrictionsBrowserKeyRestrictionsFields(r *Key, o *KeyRestrictionsBrowserKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsServerKeyRestrictionsFields(r *Key, o *KeyRestrictionsServerKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsAndroidKeyRestrictionsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsFields(r *Key, o *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) error { + return nil +} +func postReadExtractKeyRestrictionsIosKeyRestrictionsFields(r *Key, o *KeyRestrictionsIosKeyRestrictions) error { + return nil +} +func postReadExtractKeyRestrictionsApiTargetsFields(r *Key, o *KeyRestrictionsApiTargets) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl new file mode 100644 index 000000000000..d342aca956d3 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl @@ -0,0 +1,310 @@ +package apikeys + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLKeySchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Apikeys/Key", + Description: "The Apikeys Key resource", + StructName: "Key", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Key": &dcl.Component{ + Title: "Key", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", + Locations: []string{ + "global", + }, + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + }, + Properties: map[string]*dcl.Property{ + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Human-readable display name of this API key. Modifiable by user.", + }, + "keyString": &dcl.Property{ + Type: "string", + GoName: "KeyString", + ReadOnly: true, + Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", + Immutable: true, + Sensitive: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + Immutable: true, + Parameter: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "restrictions": &dcl.Property{ + Type: "object", + GoName: "Restrictions", + GoType: "KeyRestrictions", + Description: "Key restrictions.", + Properties: map[string]*dcl.Property{ + "androidKeyRestrictions": &dcl.Property{ + Type: "object", + GoName: "AndroidKeyRestrictions", + GoType: "KeyRestrictionsAndroidKeyRestrictions", + Description: "The Android apps that are allowed to use the key.", + Conflicts: []string{ + "browserKeyRestrictions", + "serverKeyRestrictions", + "iosKeyRestrictions", + }, + Required: []string{ + "allowedApplications", + }, + Properties: map[string]*dcl.Property{ + "allowedApplications": &dcl.Property{ + Type: "array", + GoName: "AllowedApplications", + Description: "A list of Android applications that are allowed to make API calls with this key.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", + Required: []string{ + "sha1Fingerprint", + "packageName", + }, + Properties: map[string]*dcl.Property{ + "packageName": &dcl.Property{ + Type: "string", + GoName: "PackageName", + Description: "The package name of the application.", + }, + "sha1Fingerprint": &dcl.Property{ + Type: "string", + GoName: "Sha1Fingerprint", + Description: "The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter.", + }, + }, + }, + }, + }, + }, + "apiTargets": &dcl.Property{ + Type: "array", + GoName: "ApiTargets", + Description: "A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "KeyRestrictionsApiTargets", + Required: []string{ + "service", + }, + Properties: map[string]*dcl.Property{ + "methods": &dcl.Property{ + Type: "array", + GoName: "Methods", + Description: "Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*`", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project.", + }, + }, + }, + }, + "browserKeyRestrictions": &dcl.Property{ + Type: "object", + GoName: "BrowserKeyRestrictions", + GoType: "KeyRestrictionsBrowserKeyRestrictions", + Description: "The HTTP referrers (websites) that are allowed to use the key.", + Conflicts: []string{ + "serverKeyRestrictions", + "androidKeyRestrictions", + "iosKeyRestrictions", + }, + Required: []string{ + "allowedReferrers", + }, + Properties: map[string]*dcl.Property{ + "allowedReferrers": &dcl.Property{ + Type: "array", + GoName: "AllowedReferrers", + Description: "A list of regular expressions for the referrer URLs that are allowed to make API calls with this key.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "iosKeyRestrictions": &dcl.Property{ + Type: "object", + GoName: "IosKeyRestrictions", + GoType: "KeyRestrictionsIosKeyRestrictions", + Description: "The iOS apps that are allowed to use the key.", + Conflicts: []string{ + "browserKeyRestrictions", + "serverKeyRestrictions", + "androidKeyRestrictions", + }, + Required: []string{ + "allowedBundleIds", + }, + Properties: map[string]*dcl.Property{ + "allowedBundleIds": &dcl.Property{ + Type: "array", + GoName: "AllowedBundleIds", + Description: "A list of bundle IDs that are allowed when making API calls with this key.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "serverKeyRestrictions": &dcl.Property{ + Type: "object", + GoName: "ServerKeyRestrictions", + GoType: "KeyRestrictionsServerKeyRestrictions", + Description: "The IP addresses of callers that are allowed to use the key.", + Conflicts: []string{ + "browserKeyRestrictions", + "androidKeyRestrictions", + "iosKeyRestrictions", + }, + Required: []string{ + "allowedIps", + }, + Properties: map[string]*dcl.Property{ + "allowedIps": &dcl.Property{ + Type: "array", + GoName: "AllowedIps", + Description: "A list of the caller IP addresses that are allowed to make API calls with this key.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + }, + }, + "serviceAccountEmail": &dcl.Property{ + Type: "string", + GoName: "ServiceAccountEmail", + Description: "The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. Unique id in UUID4 format.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e67cfff3948d --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package apikeys + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLApikeysClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ApikeysBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go new file mode 100644 index 000000000000..9360cf572789 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key.go @@ -0,0 +1,697 @@ +package apikeys + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApikeysKey() *schema.Resource { + return &schema.Resource{ + Create: resourceApikeysKeyCreate, + Read: resourceApikeysKeyRead, + Update: resourceApikeysKeyUpdate, + Delete: resourceApikeysKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApikeysKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Human-readable display name of this API key. Modifiable by user.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "Key restrictions.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsSchema(), + }, + + "service_account_email": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details.", + }, + + "key_string": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique id in UUID4 format.", + }, + }, + } +} + +func ApikeysKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "android_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The Android apps that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsAndroidKeyRestrictionsSchema(), + }, + + "api_targets": { + Type: schema.TypeList, + Optional: true, + Description: "A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed.", + Elem: ApikeysKeyRestrictionsApiTargetsSchema(), + }, + + "browser_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The HTTP referrers (websites) that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsBrowserKeyRestrictionsSchema(), + }, + + "ios_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The iOS apps that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsIosKeyRestrictionsSchema(), + }, + + "server_key_restrictions": { + Type: schema.TypeList, + Optional: true, + Description: "The IP addresses of callers that are allowed to use the key.", + MaxItems: 1, + Elem: ApikeysKeyRestrictionsServerKeyRestrictionsSchema(), + }, + }, + } +} + +func ApikeysKeyRestrictionsAndroidKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_applications": { + Type: schema.TypeList, + Required: true, + Description: "A list of Android applications that are allowed to make API calls with this key.", + Elem: ApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSchema(), + }, + }, + } +} + +func ApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "package_name": { + Type: schema.TypeString, + Required: true, + Description: "The package name of the application.", + }, + + "sha1_fingerprint": { + Type: schema.TypeString, + Required: true, + Description: "The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter.", + }, + }, + } +} + +func ApikeysKeyRestrictionsApiTargetsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + Description: "The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project.", + }, + + "methods": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*`", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsBrowserKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_referrers": { + Type: schema.TypeList, + Required: true, + Description: "A list of regular expressions for the referrer URLs that are allowed to make API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsIosKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_bundle_ids": { + Type: schema.TypeList, + Required: true, + Description: "A list of bundle IDs that are allowed when making API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ApikeysKeyRestrictionsServerKeyRestrictionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_ips": { + Type: schema.TypeList, + Required: true, + Description: "A list of the caller IP addresses that are allowed to make API calls with this key.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceApikeysKeyRead(d, meta) +} + +func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetKey(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ApikeysKey %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("restrictions", flattenApikeysKeyRestrictions(res.Restrictions)); err != nil { + return fmt.Errorf("error setting restrictions in state: %s", err) + } + if err = d.Set("service_account_email", res.ServiceAccountEmail); err != nil { + return fmt.Errorf("error setting service_account_email in state: %s", err) + } + if err = d.Set("key_string", res.KeyString); err != nil { + return fmt.Errorf("error setting key_string in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} +func resourceApikeysKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceApikeysKeyRead(d, meta) +} + +func resourceApikeysKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Project: dcl.String(project), + Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), + ServiceAccountEmail: dcl.String(d.Get("service_account_email").(string)), + } + + log.Printf("[DEBUG] Deleting Key %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteKey(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Key: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Key %q", d.Id()) + return nil +} + +func resourceApikeysKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/keys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandApikeysKeyRestrictions(o interface{}) *KeyRestrictions { + if o == nil { + return EmptyKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictions{ + AndroidKeyRestrictions: expandApikeysKeyRestrictionsAndroidKeyRestrictions(obj["android_key_restrictions"]), + ApiTargets: expandApikeysKeyRestrictionsApiTargetsArray(obj["api_targets"]), + BrowserKeyRestrictions: expandApikeysKeyRestrictionsBrowserKeyRestrictions(obj["browser_key_restrictions"]), + IosKeyRestrictions: expandApikeysKeyRestrictionsIosKeyRestrictions(obj["ios_key_restrictions"]), + ServerKeyRestrictions: expandApikeysKeyRestrictionsServerKeyRestrictions(obj["server_key_restrictions"]), + } +} + +func flattenApikeysKeyRestrictions(obj *KeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "android_key_restrictions": flattenApikeysKeyRestrictionsAndroidKeyRestrictions(obj.AndroidKeyRestrictions), + "api_targets": flattenApikeysKeyRestrictionsApiTargetsArray(obj.ApiTargets), + "browser_key_restrictions": flattenApikeysKeyRestrictionsBrowserKeyRestrictions(obj.BrowserKeyRestrictions), + "ios_key_restrictions": flattenApikeysKeyRestrictionsIosKeyRestrictions(obj.IosKeyRestrictions), + "server_key_restrictions": flattenApikeysKeyRestrictionsServerKeyRestrictions(obj.ServerKeyRestrictions), + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsAndroidKeyRestrictions(o interface{}) *KeyRestrictionsAndroidKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsAndroidKeyRestrictions{ + AllowedApplications: expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(obj["allowed_applications"]), + } +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictions(obj *KeyRestrictionsAndroidKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_applications": flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(obj.AllowedApplications), + } + + return []interface{}{transformed} + +} +func expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(o interface{}) []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if o == nil { + return make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0) + } + + items := make([]KeyRestrictionsAndroidKeyRestrictionsAllowedApplications, 0, len(objs)) + for _, item := range objs { + i := expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(item) + items = append(items, *i) + } + + return items +} + +func expandApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(o interface{}) *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications { + if o == nil { + return EmptyKeyRestrictionsAndroidKeyRestrictionsAllowedApplications + } + + obj := o.(map[string]interface{}) + return &KeyRestrictionsAndroidKeyRestrictionsAllowedApplications{ + PackageName: dcl.String(obj["package_name"].(string)), + Sha1Fingerprint: dcl.String(obj["sha1_fingerprint"].(string)), + } +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplicationsArray(objs []KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(&item) + items = append(items, i) + } + + return items +} + +func flattenApikeysKeyRestrictionsAndroidKeyRestrictionsAllowedApplications(obj *KeyRestrictionsAndroidKeyRestrictionsAllowedApplications) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "package_name": obj.PackageName, + "sha1_fingerprint": obj.Sha1Fingerprint, + } + + return transformed + +} +func expandApikeysKeyRestrictionsApiTargetsArray(o interface{}) []KeyRestrictionsApiTargets { + if o == nil { + return make([]KeyRestrictionsApiTargets, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]KeyRestrictionsApiTargets, 0) + } + + items := make([]KeyRestrictionsApiTargets, 0, len(objs)) + for _, item := range objs { + i := expandApikeysKeyRestrictionsApiTargets(item) + items = append(items, *i) + } + + return items +} + +func expandApikeysKeyRestrictionsApiTargets(o interface{}) *KeyRestrictionsApiTargets { + if o == nil { + return EmptyKeyRestrictionsApiTargets + } + + obj := o.(map[string]interface{}) + return &KeyRestrictionsApiTargets{ + Service: dcl.String(obj["service"].(string)), + Methods: tpgdclresource.ExpandStringArray(obj["methods"]), + } +} + +func flattenApikeysKeyRestrictionsApiTargetsArray(objs []KeyRestrictionsApiTargets) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenApikeysKeyRestrictionsApiTargets(&item) + items = append(items, i) + } + + return items +} + +func flattenApikeysKeyRestrictionsApiTargets(obj *KeyRestrictionsApiTargets) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + "methods": obj.Methods, + } + + return transformed + +} + +func expandApikeysKeyRestrictionsBrowserKeyRestrictions(o interface{}) *KeyRestrictionsBrowserKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsBrowserKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsBrowserKeyRestrictions{ + AllowedReferrers: tpgdclresource.ExpandStringArray(obj["allowed_referrers"]), + } +} + +func flattenApikeysKeyRestrictionsBrowserKeyRestrictions(obj *KeyRestrictionsBrowserKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_referrers": obj.AllowedReferrers, + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsIosKeyRestrictions(o interface{}) *KeyRestrictionsIosKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsIosKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsIosKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsIosKeyRestrictions{ + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), + } +} + +func flattenApikeysKeyRestrictionsIosKeyRestrictions(obj *KeyRestrictionsIosKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_bundle_ids": obj.AllowedBundleIds, + } + + return []interface{}{transformed} + +} + +func expandApikeysKeyRestrictionsServerKeyRestrictions(o interface{}) *KeyRestrictionsServerKeyRestrictions { + if o == nil { + return EmptyKeyRestrictionsServerKeyRestrictions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyRestrictionsServerKeyRestrictions + } + obj := objArr[0].(map[string]interface{}) + return &KeyRestrictionsServerKeyRestrictions{ + AllowedIps: tpgdclresource.ExpandStringArray(obj["allowed_ips"]), + } +} + +func flattenApikeysKeyRestrictionsServerKeyRestrictions(obj *KeyRestrictionsServerKeyRestrictions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_ips": obj.AllowedIps, + } + + return []interface{}{transformed} + +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl new file mode 100644 index 000000000000..1368e684c5ce --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl @@ -0,0 +1,452 @@ +package apikeys_test + +import ( + "context" + "fmt" + "strings" + "testing" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccApikeysKey_AndroidKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_AndroidKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_AndroidKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_BasicKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_BasicKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_BasicKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_IosKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_IosKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_IosKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_MinimalKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_MinimalKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_ServerKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_ServerKey(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApikeysKey_ServerKeyUpdate0(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccApikeysKey_ServiceAccountKeyHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckApikeysKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApikeysKey_ServiceAccountKeyHandWritten(context), + }, + { + ResourceName: "google_apikeys_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccApikeysKey_AndroidKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app123" + sha1_fingerprint = "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_AndroidKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app124" + sha1_fingerprint = "1cf89aa28625da86a7e5a7550cf7fd33d611f6fd" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_BasicKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + browser_key_restrictions { + allowed_referrers = [".*"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_BasicKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key-update" + + restrictions { + api_targets { + service = "maps.googleapis.com" + methods = ["POST*"] + } + + browser_key_restrictions { + allowed_referrers = [".*com"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_IosKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.app.macos"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_IosKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.alex.ios"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_MinimalKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" +} + + +`, context) +} + +func testAccApikeysKey_ServerKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.1"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_ServerKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.2", "192.168.1.1"] + } + } +} + + +`, context) +} + +func testAccApikeysKey_ServiceAccountKeyHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_apikeys_key" "primary" { + name = "tf-test-key%{random_suffix}" + display_name = "sample-key" + project = google_project.project.project_id + service_account_email = google_service_account.key_service_account.email +} + +resource "google_project" "project" { + project_id = "tf-test-app%{random_suffix}" + name = "tf-test-app%{random_suffix}" + org_id = "%{org_id}" + deletion_policy = "DELETE" +} + +resource "google_service_account" "key_service_account" { + account_id = "tf-test-app%{random_suffix}" + project = google_project.project.project_id + display_name = "Test Service Account" +} +`, context) +} + +func testAccCheckApikeysKeyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_apikeys_key" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &Key{ + Name: dcl.String(rs.Primary.Attributes["name"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + ServiceAccountEmail: dcl.String(rs.Primary.Attributes["service_account_email"]), + KeyString: dcl.StringOrNil(rs.Primary.Attributes["key_string"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := transport_tpg.NewDCLApikeysClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetKey(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_apikeys_key still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go new file mode 100644 index 000000000000..87ab4feb93c4 --- /dev/null +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_sweeper.go @@ -0,0 +1,53 @@ +package apikeys + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ApikeysKey", testSweepApikeysKey) +} + +func testSweepApikeysKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ApikeysKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLApikeysClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableApikeysKey) + if err != nil { + return err + } + return nil +} + +func isDeletableApikeysKey(r *Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl new file mode 100644 index 000000000000..8331dd43c3b4 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl @@ -0,0 +1,137 @@ +package assuredworkloads + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "regexp" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +var universeDomainRegex = regexp.MustCompile(`https://[^/.]+([^/]+)/`) + +// Returns the URL of the project resource with the given index in the workload. +func (r *Workload) projectURL(userBasePath string, index int) (string, error) { + params := map[string]any{ + "project": dcl.ValueOrEmptyString(r.Resources[index].ResourceId), + } + // This is a hack to support universe domains & custom endpoints. This should really be + // handled by using a properly-configured cloud resource manager client to delete the + // project, but that's not available in this context. We will want to fix this when migrating + // to MMv1. + if userBasePath != "" { + matches := universeDomainRegex.FindStringSubmatch(userBasePath) + if len(matches) > 0 { + userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v1/", matches[1]) + } + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}", "https://cloudresourcemanager.googleapis.com/v1/", userBasePath, params), nil +} + +// Returns the URL of the folder resource with the given index in the workload. +func (r *Workload) folderURL(userBasePath string, index int) (string, error) { + params := map[string]any{ + "folder": dcl.ValueOrEmptyString(r.Resources[index].ResourceId), + } + // This is a hack to support universe domains & custom endpoints. This should really be + // handled by using a properly-configured cloud resource manager client to delete the + // folder, but that's not available in this context. We will want to fix this when migrating + // to MMv1. + if userBasePath != "" { + matches := universeDomainRegex.FindStringSubmatch(userBasePath) + if len(matches) > 0 { + userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v2/", matches[1]) + } + } + return dcl.URL("folders/{{ "{{" }}folder{{ "}}" }}", "https://cloudresourcemanager.googleapis.com/v2/", userBasePath, params), nil +} + +// Returns the lifecycle state of the project or folder resource with the given url. +func lifecycleState(ctx context.Context, client *Client, url string) (string, error) { + resp, err := dcl.SendRequest(ctx, client.Config, "GET", url, &bytes.Buffer{}, client.Config.RetryProvider) + if err != nil { + return "", err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return "", err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return "", err + } + state, ok := m["lifecycleState"].(string) + if !ok { + return "", fmt.Errorf("no lifecycle state for resource at %q", url) + } + return state, nil +} + +// Deletes the resource with the given URL. Returns true if it is already in DELETE_REQUESTED state, +// otherwise returns false. +func deleteResource(ctx context.Context, client *Client, url string) (bool, error) { + state, err := lifecycleState(ctx, client, url) + if err != nil { + return false, err + } + if state == "DELETE_REQUESTED" { + // Do not delete an already deleted resource. + return true, nil + } + // Send delete request for resources not already deleted. + _, err = dcl.SendRequest(ctx, client.Config, "DELETE", url, &bytes.Buffer{}, client.Config.RetryProvider) + if err != nil { + return false, fmt.Errorf("failed to delete resource at %s: %w", url, err) + } + return false, nil +} + +// Deletes projects and folders owned by the workload prior to workload deletion. +func (r *Workload) deleteResources(ctx context.Context, client *Client) error { + nr := r.urlNormalized() + return dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + // First, delete projects + for i, resource := range nr.Resources { + if resource.ResourceType == nil { + return nil, fmt.Errorf("nil resource type in workload %q", dcl.ValueOrEmptyString(nr.Name)) + } + if *resource.ResourceType == WorkloadResourcesResourceTypeEnum("CONSUMER_PROJECT") || *resource.ResourceType == WorkloadResourcesResourceTypeEnum("ENCRYPTION_KEYS_PROJECT") { + u, err := nr.projectURL(client.Config.BasePath, i) + if err != nil { + return nil, err + } + deleted, err := deleteResource(ctx, client, u) + if err != nil { + return nil, err + } + if !deleted { + // Retry until all resources are being deleted. + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + } + } + // Then, delete folders + for i, resource := range nr.Resources { + if *resource.ResourceType == WorkloadResourcesResourceTypeEnum("CONSUMER_FOLDER") { + u, err := nr.folderURL(client.Config.BasePath, i) + if err != nil { + return nil, err + } + deleted, err := deleteResource(ctx, client, u) + if err != nil { + return nil, err + } + if !deleted { + // Retry until all resources are being deleted. + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + } + } + // All project and folder resources are in DELETE_REQUESTED state. + return nil, nil + }, client.Config.RetryProvider) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl new file mode 100644 index 000000000000..6f3d2273c526 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl @@ -0,0 +1,18 @@ +package assuredworkloads + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e240c1b3e039 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package assuredworkloads + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLAssuredWorkloadsClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.AssuredWorkloadsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl new file mode 100644 index 000000000000..629256b2857b --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl @@ -0,0 +1,971 @@ +package assuredworkloads + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAssuredWorkloadsWorkload() *schema.Resource { + return &schema.Resource{ + Create: resourceAssuredWorkloadsWorkloadCreate, + Read: resourceAssuredWorkloadsWorkloadRead, + Update: resourceAssuredWorkloadsWorkloadUpdate, + Delete: resourceAssuredWorkloadsWorkloadDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAssuredWorkloadsWorkloadImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "compliance_regime": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075", + }, + + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The organization for the resource", + }, + + "billing_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "enable_sovereign_controls": { + Type: schema.TypeBool, +{{- if ne $.TargetVersionName "ga" }} + Computed: true, +{{- end }} + Optional: true, + ForceNew: true, + Description: "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", + }, + + "kms_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "**DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadKmsSettingsSchema(), + }, + + "partner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", + }, + + "partner_permissions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Permissions granted to the AW Partner SA account for the customer workload", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadPartnerPermissionsSchema(), + }, + + "partner_services_billing_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", + }, + + "provisioned_resources_parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}", + }, + + "resource_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", + Elem: AssuredWorkloadsWorkloadResourceSettingsSchema(), + }, + + "violation_notifications_enabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", + }, + + "workload_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadWorkloadOptionsSchema(), + }, + + "compliance_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Count of active Violations in the Workload.", + Elem: AssuredWorkloadsWorkloadComplianceStatusSchema(), + }, + + "compliant_but_disallowed_services": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Immutable. The Workload creation timestamp.", + }, + + "ekm_provisioning_response": { + Type: schema.TypeList, + Computed: true, + Description: "Optional. Represents the Ekm Provisioning State of the given workload.", + Elem: AssuredWorkloadsWorkloadEkmProvisioningResponseSchema(), + }, + + "kaj_enrollment_state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels applied to the workload.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The resource name of the workload.", + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", + Elem: AssuredWorkloadsWorkloadResourcesSchema(), + }, + + "saa_enrollment_response": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page.", + Elem: AssuredWorkloadsWorkloadSaaEnrollmentResponseSchema(), + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadKmsSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "next_rotation_time": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", + }, + + "rotation_period": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadPartnerPermissionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assured_workloads_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Allow partner to view violation alerts.", + }, + + "data_logs_viewer": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Allow the partner to view inspectability logs and monitoring violations.", + }, + + "service_access_approver": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Allow partner to view access approval logs.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourceSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "User-assigned resource display name. If not empty it will be used to create a resource with the specified name.", + }, + + "resource_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google.", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func AssuredWorkloadsWorkloadWorkloadOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kaj_enrollment_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + }, + }, + } +} + +func AssuredWorkloadsWorkloadComplianceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "acknowledged_violation_count": { + Type: schema.TypeList, + Computed: true, + Description: "Number of current orgPolicy violations which are acknowledged.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "active_violation_count": { + Type: schema.TypeList, + Computed: true, + Description: "Number of current orgPolicy violations which are not acknowledged.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func AssuredWorkloadsWorkloadEkmProvisioningResponseSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ekm_provisioning_error_domain": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates Ekm provisioning error if any. Possible values: EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR, EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR", + }, + + "ekm_provisioning_error_mapping": { + Type: schema.TypeString, + Computed: true, + Description: "Detailed error message if Ekm provisioning fails Possible values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT, MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION", + }, + + "ekm_provisioning_state": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates Ekm enrollment Provisioning of a given workload. Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING, EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Resource identifier. For a project this represents project_number.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func AssuredWorkloadsWorkloadSaaEnrollmentResponseSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "setup_errors": { + Type: schema.TypeList, + Computed: true, + Description: "Indicates SAA enrollment setup error if any.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "setup_status": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates SAA enrollment status of a given workload. Possible values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE", + }, + }, + } +} + +func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Workload: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkload(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("AssuredWorkloadsWorkload %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("compliance_regime", res.ComplianceRegime); err != nil { + return fmt.Errorf("error setting compliance_regime in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("organization", res.Organization); err != nil { + return fmt.Errorf("error setting organization in state: %s", err) + } + if err = d.Set("billing_account", res.BillingAccount); err != nil { + return fmt.Errorf("error setting billing_account in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("enable_sovereign_controls", res.EnableSovereignControls); err != nil { + return fmt.Errorf("error setting enable_sovereign_controls in state: %s", err) + } + if err = d.Set("kms_settings", flattenAssuredWorkloadsWorkloadKmsSettings(res.KmsSettings)); err != nil { + return fmt.Errorf("error setting kms_settings in state: %s", err) + } + if err = d.Set("partner", res.Partner); err != nil { + return fmt.Errorf("error setting partner in state: %s", err) + } + if err = d.Set("partner_permissions", flattenAssuredWorkloadsWorkloadPartnerPermissions(res.PartnerPermissions)); err != nil { + return fmt.Errorf("error setting partner_permissions in state: %s", err) + } + if err = d.Set("partner_services_billing_account", res.PartnerServicesBillingAccount); err != nil { + return fmt.Errorf("error setting partner_services_billing_account in state: %s", err) + } + if err = d.Set("provisioned_resources_parent", res.ProvisionedResourcesParent); err != nil { + return fmt.Errorf("error setting provisioned_resources_parent in state: %s", err) + } + if err = d.Set("resource_settings", flattenAssuredWorkloadsWorkloadResourceSettingsArray(res.ResourceSettings)); err != nil { + return fmt.Errorf("error setting resource_settings in state: %s", err) + } + if err = d.Set("violation_notifications_enabled", res.ViolationNotificationsEnabled); err != nil { + return fmt.Errorf("error setting violation_notifications_enabled in state: %s", err) + } + if err = d.Set("workload_options", flattenAssuredWorkloadsWorkloadWorkloadOptions(res.WorkloadOptions)); err != nil { + return fmt.Errorf("error setting workload_options in state: %s", err) + } + if err = d.Set("compliance_status", flattenAssuredWorkloadsWorkloadComplianceStatus(res.ComplianceStatus)); err != nil { + return fmt.Errorf("error setting compliance_status in state: %s", err) + } + if err = d.Set("compliant_but_disallowed_services", res.CompliantButDisallowedServices); err != nil { + return fmt.Errorf("error setting compliant_but_disallowed_services in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("ekm_provisioning_response", flattenAssuredWorkloadsWorkloadEkmProvisioningResponse(res.EkmProvisioningResponse)); err != nil { + return fmt.Errorf("error setting ekm_provisioning_response in state: %s", err) + } + if err = d.Set("kaj_enrollment_state", res.KajEnrollmentState); err != nil { + return fmt.Errorf("error setting kaj_enrollment_state in state: %s", err) + } + if err = d.Set("labels", flattenAssuredWorkloadsWorkloadLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resources", flattenAssuredWorkloadsWorkloadResourcesArray(res.Resources)); err != nil { + return fmt.Errorf("error setting resources in state: %s", err) + } + if err = d.Set("saa_enrollment_response", flattenAssuredWorkloadsWorkloadSaaEnrollmentResponse(res.SaaEnrollmentResponse)); err != nil { + return fmt.Errorf("error setting saa_enrollment_response in state: %s", err) + } + if err = d.Set("terraform_labels", flattenAssuredWorkloadsWorkloadTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + + return nil +} +func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + // Construct state hint from old values + old := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(dcl.OldValue(d.GetChange("compliance_regime")).(string)), + DisplayName: dcl.String(dcl.OldValue(d.GetChange("display_name")).(string)), + Location: dcl.String(dcl.OldValue(d.GetChange("location")).(string)), + Organization: dcl.String(dcl.OldValue(d.GetChange("organization")).(string)), + BillingAccount: dcl.String(dcl.OldValue(d.GetChange("billing_account")).(string)), + Labels: tpgresource.CheckStringMap(dcl.OldValue(d.GetChange("effective_labels"))), + EnableSovereignControls: dcl.Bool(dcl.OldValue(d.GetChange("enable_sovereign_controls")).(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(dcl.OldValue(d.GetChange("kms_settings"))), + Partner: WorkloadPartnerEnumRef(dcl.OldValue(d.GetChange("partner")).(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(dcl.OldValue(d.GetChange("partner_permissions"))), + PartnerServicesBillingAccount: dcl.String(dcl.OldValue(d.GetChange("partner_services_billing_account")).(string)), + ProvisionedResourcesParent: dcl.String(dcl.OldValue(d.GetChange("provisioned_resources_parent")).(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(dcl.OldValue(d.GetChange("resource_settings"))), + ViolationNotificationsEnabled: dcl.Bool(dcl.OldValue(d.GetChange("violation_notifications_enabled")).(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(dcl.OldValue(d.GetChange("workload_options"))), + Name: dcl.StringOrNil(dcl.OldValue(d.GetChange("name")).(string)), + } + directive := dcl.UpdateDirective + directive = append(directive, dcl.WithStateHint(old)) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Workload: %s", err) + } + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + BillingAccount: dcl.String(d.Get("billing_account").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EnableSovereignControls: dcl.Bool(d.Get("enable_sovereign_controls").(bool)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Partner: WorkloadPartnerEnumRef(d.Get("partner").(string)), + PartnerPermissions: expandAssuredWorkloadsWorkloadPartnerPermissions(d.Get("partner_permissions")), + PartnerServicesBillingAccount: dcl.String(d.Get("partner_services_billing_account").(string)), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + ViolationNotificationsEnabled: dcl.Bool(d.Get("violation_notifications_enabled").(bool)), + WorkloadOptions: expandAssuredWorkloadsWorkloadWorkloadOptions(d.Get("workload_options")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Workload %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkload(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Workload: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Workload %q", d.Id()) + return nil +} + +func resourceAssuredWorkloadsWorkloadImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/locations/(?P[^/]+)/workloads/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandAssuredWorkloadsWorkloadKmsSettings(o interface{}) *WorkloadKmsSettings { + if o == nil { + return EmptyWorkloadKmsSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadKmsSettings + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadKmsSettings{ + NextRotationTime: dcl.String(obj["next_rotation_time"].(string)), + RotationPeriod: dcl.String(obj["rotation_period"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadKmsSettings(obj *WorkloadKmsSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "next_rotation_time": obj.NextRotationTime, + "rotation_period": obj.RotationPeriod, + } + + return []interface{}{transformed} + +} + +func expandAssuredWorkloadsWorkloadPartnerPermissions(o interface{}) *WorkloadPartnerPermissions { + if o == nil { + return EmptyWorkloadPartnerPermissions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadPartnerPermissions + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadPartnerPermissions{ + AssuredWorkloadsMonitoring: dcl.Bool(obj["assured_workloads_monitoring"].(bool)), + DataLogsViewer: dcl.Bool(obj["data_logs_viewer"].(bool)), + ServiceAccessApprover: dcl.Bool(obj["service_access_approver"].(bool)), + } +} + +func flattenAssuredWorkloadsWorkloadPartnerPermissions(obj *WorkloadPartnerPermissions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "assured_workloads_monitoring": obj.AssuredWorkloadsMonitoring, + "data_logs_viewer": obj.DataLogsViewer, + "service_access_approver": obj.ServiceAccessApprover, + } + + return []interface{}{transformed} + +} +func expandAssuredWorkloadsWorkloadResourceSettingsArray(o interface{}) []WorkloadResourceSettings { + if o == nil { + return make([]WorkloadResourceSettings, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkloadResourceSettings, 0) + } + + items := make([]WorkloadResourceSettings, 0, len(objs)) + for _, item := range objs { + i := expandAssuredWorkloadsWorkloadResourceSettings(item) + items = append(items, *i) + } + + return items +} + +func expandAssuredWorkloadsWorkloadResourceSettings(o interface{}) *WorkloadResourceSettings { + if o == nil { + return EmptyWorkloadResourceSettings + } + + obj := o.(map[string]interface{}) + return &WorkloadResourceSettings{ + DisplayName: dcl.String(obj["display_name"].(string)), + ResourceId: dcl.String(obj["resource_id"].(string)), + ResourceType: WorkloadResourceSettingsResourceTypeEnumRef(obj["resource_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadResourceSettingsArray(objs []WorkloadResourceSettings) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResourceSettings(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResourceSettings(obj *WorkloadResourceSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "display_name": obj.DisplayName, + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} + +func expandAssuredWorkloadsWorkloadWorkloadOptions(o interface{}) *WorkloadWorkloadOptions { + if o == nil { + return EmptyWorkloadWorkloadOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkloadWorkloadOptions + } + obj := objArr[0].(map[string]interface{}) + return &WorkloadWorkloadOptions{ + KajEnrollmentType: WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(obj["kaj_enrollment_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadWorkloadOptions(obj *WorkloadWorkloadOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kaj_enrollment_type": obj.KajEnrollmentType, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadComplianceStatus(obj *WorkloadComplianceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "acknowledged_violation_count": obj.AcknowledgedViolationCount, + "active_violation_count": obj.ActiveViolationCount, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadEkmProvisioningResponse(obj *WorkloadEkmProvisioningResponse) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ekm_provisioning_error_domain": obj.EkmProvisioningErrorDomain, + "ekm_provisioning_error_mapping": obj.EkmProvisioningErrorMapping, + "ekm_provisioning_state": obj.EkmProvisioningState, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadResourcesArray(objs []WorkloadResources) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResources(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResources(obj *WorkloadResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} + +func flattenAssuredWorkloadsWorkloadSaaEnrollmentResponse(obj *WorkloadSaaEnrollmentResponse) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "setup_errors": flattenAssuredWorkloadsWorkloadSaaEnrollmentResponseSetupErrorsArray(obj.SetupErrors), + "setup_status": obj.SetupStatus, + } + + return []interface{}{transformed} + +} + +func flattenAssuredWorkloadsWorkloadLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenAssuredWorkloadsWorkloadTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenAssuredWorkloadsWorkloadSaaEnrollmentResponseSetupErrorsArray(obj []WorkloadSaaEnrollmentResponseSetupErrorsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl new file mode 100644 index 000000000000..793ec01a19c2 --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl @@ -0,0 +1,392 @@ +package assuredworkloads_test + +import ( + "context" + "fmt" + "strings" + "testing" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +{{- if ne $.TargetVersionName "ga" }} +func TestAccAssuredWorkloadsWorkload_SovereignControlsWorkload(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_SovereignControlsWorkload(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + { + Config: testAccAssuredWorkloadsWorkload_SovereignControlsWorkloadUpdate0(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +{{- end }} +func TestAccAssuredWorkloadsWorkload_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccAssuredWorkloadsWorkload_FullHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_FullHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "workload_options", "provisioned_resources_parent", "partner_services_billing_account", "labels", "terraform_labels"}, + }, + }, + }) +{{- if ne $.TargetVersionName "ga" }} +} + +func testAccAssuredWorkloadsWorkload_SovereignControlsWorkload(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "tf-test-display%{random_suffix}" + location = "europe-west9" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + enable_sovereign_controls = true + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "tf-test-ring%{random_suffix}" + resource_type = "KEYRING" + } + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +`, context) +} + +func testAccAssuredWorkloadsWorkload_SovereignControlsWorkloadUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "updated-example" + location = "europe-west9" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + + labels = { + label-two = "value-two-eu-regions-and-support" + } + provider = google-beta +} + +`, context) +} + +func testAccAssuredWorkloadsWorkload_SplitBillingPartnerWorkload(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "ASSURED_WORKLOADS_FOR_PARTNERS" + display_name = "tf-test-display%{random_suffix}" + location = "europe-west8" + organization = "%{org_id}" + billing_account = "billingAccounts/%{billing_acct}" + partner = "SOVEREIGN_CONTROLS_BY_PSN" + + partner_permissions { + assured_workloads_monitoring = true + data_logs_viewer = true + service_access_approver = true + } + + partner_services_billing_account = "billingAccounts/01BF3F-2C6DE5-30C607" + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "tf-test-ring%{random_suffix}" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +`, context) +{{- end }} +} + +func testAccAssuredWorkloadsWorkload_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "a" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" + workload_options { + kaj_enrollment_type = "KEY_ACCESS_TRANSPARENCY_OFF" + } + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "b" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_FullHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + organization = "%{org_id}" + location = "us-central1" + kms_settings { + next_rotation_time = "2022-10-02T15:01:23Z" + rotation_period = "864000s" + } + provisioned_resources_parent = google_folder.folder1.name + depends_on = [time_sleep.wait_120_seconds] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + depends_on = [google_folder.folder1] +} + + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" + deletion_protection = false +} + +`, context) +} + +func testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_assured_workloads_workload" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &Workload{ + ComplianceRegime: WorkloadComplianceRegimeEnumRef(rs.Primary.Attributes["compliance_regime"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Organization: dcl.String(rs.Primary.Attributes["organization"]), + BillingAccount: dcl.String(rs.Primary.Attributes["billing_account"]), + EnableSovereignControls: dcl.Bool(rs.Primary.Attributes["enable_sovereign_controls"] == "true"), + Partner: WorkloadPartnerEnumRef(rs.Primary.Attributes["partner"]), + PartnerServicesBillingAccount: dcl.String(rs.Primary.Attributes["partner_services_billing_account"]), + ProvisionedResourcesParent: dcl.String(rs.Primary.Attributes["provisioned_resources_parent"]), + ViolationNotificationsEnabled: dcl.Bool(rs.Primary.Attributes["violation_notifications_enabled"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + KajEnrollmentState: WorkloadKajEnrollmentStateEnumRef(rs.Primary.Attributes["kaj_enrollment_state"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetWorkload(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_assured_workloads_workload still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl new file mode 100644 index 000000000000..96a1b33b391c --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload.go.tmpl @@ -0,0 +1,1121 @@ +package assuredworkloads + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "google.golang.org/api/googleapi" +) + +type Workload struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Resources []WorkloadResources `json:"resources"` + ComplianceRegime *WorkloadComplianceRegimeEnum `json:"complianceRegime"` + CreateTime *string `json:"createTime"` + BillingAccount *string `json:"billingAccount"` + PartnerServicesBillingAccount *string `json:"partnerServicesBillingAccount"` + Labels map[string]string `json:"labels"` + ProvisionedResourcesParent *string `json:"provisionedResourcesParent"` + KmsSettings *WorkloadKmsSettings `json:"kmsSettings"` + ResourceSettings []WorkloadResourceSettings `json:"resourceSettings"` + KajEnrollmentState *WorkloadKajEnrollmentStateEnum `json:"kajEnrollmentState"` + EnableSovereignControls *bool `json:"enableSovereignControls"` + SaaEnrollmentResponse *WorkloadSaaEnrollmentResponse `json:"saaEnrollmentResponse"` + ComplianceStatus *WorkloadComplianceStatus `json:"complianceStatus"` + CompliantButDisallowedServices []string `json:"compliantButDisallowedServices"` + Partner *WorkloadPartnerEnum `json:"partner"` + PartnerPermissions *WorkloadPartnerPermissions `json:"partnerPermissions"` + WorkloadOptions *WorkloadWorkloadOptions `json:"workloadOptions"` + EkmProvisioningResponse *WorkloadEkmProvisioningResponse `json:"ekmProvisioningResponse"` + ViolationNotificationsEnabled *bool `json:"violationNotificationsEnabled"` + Organization *string `json:"organization"` + Location *string `json:"location"` +} + +func (r *Workload) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkloadResourcesResourceTypeEnum. +type WorkloadResourcesResourceTypeEnum string + +// WorkloadResourcesResourceTypeEnumRef returns a *WorkloadResourcesResourceTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadResourcesResourceTypeEnumRef(s string) *WorkloadResourcesResourceTypeEnum { + v := WorkloadResourcesResourceTypeEnum(s) + return &v +} + +func (v WorkloadResourcesResourceTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"RESOURCE_TYPE_UNSPECIFIED", "CONSUMER_PROJECT", "ENCRYPTION_KEYS_PROJECT", "KEYRING", "CONSUMER_FOLDER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadResourcesResourceTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadComplianceRegimeEnum. +type WorkloadComplianceRegimeEnum string + +// WorkloadComplianceRegimeEnumRef returns a *WorkloadComplianceRegimeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadComplianceRegimeEnumRef(s string) *WorkloadComplianceRegimeEnum { + v := WorkloadComplianceRegimeEnum(s) + return &v +} + +func (v WorkloadComplianceRegimeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", "HIPAA", "HITRUST", "EU_REGIONS_AND_SUPPORT", "CA_REGIONS_AND_SUPPORT", "ITAR", "AU_REGIONS_AND_US_SUPPORT", "ASSURED_WORKLOADS_FOR_PARTNERS", "ISR_REGIONS", "ISR_REGIONS_AND_SUPPORT", "CA_PROTECTED_B", "IL5", "IL2", "JP_REGIONS_AND_SUPPORT", "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", "REGIONAL_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT", "IRS_1075"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadComplianceRegimeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadResourceSettingsResourceTypeEnum. +type WorkloadResourceSettingsResourceTypeEnum string + +// WorkloadResourceSettingsResourceTypeEnumRef returns a *WorkloadResourceSettingsResourceTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadResourceSettingsResourceTypeEnumRef(s string) *WorkloadResourceSettingsResourceTypeEnum { + v := WorkloadResourceSettingsResourceTypeEnum(s) + return &v +} + +func (v WorkloadResourceSettingsResourceTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"RESOURCE_TYPE_UNSPECIFIED", "CONSUMER_PROJECT", "ENCRYPTION_KEYS_PROJECT", "KEYRING", "CONSUMER_FOLDER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadResourceSettingsResourceTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadKajEnrollmentStateEnum. +type WorkloadKajEnrollmentStateEnum string + +// WorkloadKajEnrollmentStateEnumRef returns a *WorkloadKajEnrollmentStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadKajEnrollmentStateEnumRef(s string) *WorkloadKajEnrollmentStateEnum { + v := WorkloadKajEnrollmentStateEnum(s) + return &v +} + +func (v WorkloadKajEnrollmentStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"KAJ_ENROLLMENT_STATE_UNSPECIFIED", "KAJ_ENROLLMENT_STATE_PENDING", "KAJ_ENROLLMENT_STATE_COMPLETE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadKajEnrollmentStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadSaaEnrollmentResponseSetupErrorsEnum. +type WorkloadSaaEnrollmentResponseSetupErrorsEnum string + +// WorkloadSaaEnrollmentResponseSetupErrorsEnumRef returns a *WorkloadSaaEnrollmentResponseSetupErrorsEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadSaaEnrollmentResponseSetupErrorsEnumRef(s string) *WorkloadSaaEnrollmentResponseSetupErrorsEnum { + v := WorkloadSaaEnrollmentResponseSetupErrorsEnum(s) + return &v +} + +func (v WorkloadSaaEnrollmentResponseSetupErrorsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SETUP_ERROR_UNSPECIFIED", "ERROR_INVALID_BASE_SETUP", "ERROR_MISSING_EXTERNAL_SIGNING_KEY", "ERROR_NOT_ALL_SERVICES_ENROLLED", "ERROR_SETUP_CHECK_FAILED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadSaaEnrollmentResponseSetupErrorsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadSaaEnrollmentResponseSetupStatusEnum. +type WorkloadSaaEnrollmentResponseSetupStatusEnum string + +// WorkloadSaaEnrollmentResponseSetupStatusEnumRef returns a *WorkloadSaaEnrollmentResponseSetupStatusEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadSaaEnrollmentResponseSetupStatusEnumRef(s string) *WorkloadSaaEnrollmentResponseSetupStatusEnum { + v := WorkloadSaaEnrollmentResponseSetupStatusEnum(s) + return &v +} + +func (v WorkloadSaaEnrollmentResponseSetupStatusEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SETUP_STATE_UNSPECIFIED", "STATUS_PENDING", "STATUS_COMPLETE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadSaaEnrollmentResponseSetupStatusEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadPartnerEnum. +type WorkloadPartnerEnum string + +// WorkloadPartnerEnumRef returns a *WorkloadPartnerEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadPartnerEnumRef(s string) *WorkloadPartnerEnum { + v := WorkloadPartnerEnum(s) + return &v +} + +func (v WorkloadPartnerEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PARTNER_UNSPECIFIED", "LOCAL_CONTROLS_BY_S3NS", "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", "SOVEREIGN_CONTROLS_BY_PSN", "SOVEREIGN_CONTROLS_BY_CNTXT", "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadPartnerEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadWorkloadOptionsKajEnrollmentTypeEnum. +type WorkloadWorkloadOptionsKajEnrollmentTypeEnum string + +// WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef returns a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s string) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + v := WorkloadWorkloadOptionsKajEnrollmentTypeEnum(s) + return &v +} + +func (v WorkloadWorkloadOptionsKajEnrollmentTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"KAJ_ENROLLMENT_TYPE_UNSPECIFIED", "FULL_KAJ", "EKM_ONLY", "KEY_ACCESS_TRANSPARENCY_OFF"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningStateEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningStateEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningStateEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_STATE_UNSPECIFIED", "EKM_PROVISIONING_STATE_PENDING", "EKM_PROVISIONING_STATE_FAILED", "EKM_PROVISIONING_STATE_COMPLETED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED", "UNSPECIFIED_ERROR", "GOOGLE_SERVER_ERROR", "EXTERNAL_USER_ERROR", "EXTERNAL_PARTNER_ERROR", "TIMEOUT_ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum. +type WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum string + +// WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef returns a *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef(s string) *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + v := WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(s) + return &v +} + +func (v WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED", "INVALID_SERVICE_ACCOUNT", "MISSING_METRICS_SCOPE_ADMIN_PERMISSION", "MISSING_EKM_CONNECTION_ADMIN_PERMISSION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkloadResources struct { + empty bool `json:"-"` + ResourceId *int64 `json:"resourceId"` + ResourceType *WorkloadResourcesResourceTypeEnum `json:"resourceType"` +} + +type jsonWorkloadResources WorkloadResources + +func (r *WorkloadResources) UnmarshalJSON(data []byte) error { + var res jsonWorkloadResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadResources + } else { + + r.ResourceId = res.ResourceId + + r.ResourceType = res.ResourceType + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadResources *WorkloadResources = &WorkloadResources{empty: true} + +func (r *WorkloadResources) Empty() bool { + return r.empty +} + +func (r *WorkloadResources) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadKmsSettings struct { + empty bool `json:"-"` + NextRotationTime *string `json:"nextRotationTime"` + RotationPeriod *string `json:"rotationPeriod"` +} + +type jsonWorkloadKmsSettings WorkloadKmsSettings + +func (r *WorkloadKmsSettings) UnmarshalJSON(data []byte) error { + var res jsonWorkloadKmsSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadKmsSettings + } else { + + r.NextRotationTime = res.NextRotationTime + + r.RotationPeriod = res.RotationPeriod + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadKmsSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadKmsSettings *WorkloadKmsSettings = &WorkloadKmsSettings{empty: true} + +func (r *WorkloadKmsSettings) Empty() bool { + return r.empty +} + +func (r *WorkloadKmsSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadKmsSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadResourceSettings struct { + empty bool `json:"-"` + ResourceId *string `json:"resourceId"` + ResourceType *WorkloadResourceSettingsResourceTypeEnum `json:"resourceType"` + DisplayName *string `json:"displayName"` +} + +type jsonWorkloadResourceSettings WorkloadResourceSettings + +func (r *WorkloadResourceSettings) UnmarshalJSON(data []byte) error { + var res jsonWorkloadResourceSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadResourceSettings + } else { + + r.ResourceId = res.ResourceId + + r.ResourceType = res.ResourceType + + r.DisplayName = res.DisplayName + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadResourceSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadResourceSettings *WorkloadResourceSettings = &WorkloadResourceSettings{empty: true} + +func (r *WorkloadResourceSettings) Empty() bool { + return r.empty +} + +func (r *WorkloadResourceSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadResourceSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadSaaEnrollmentResponse struct { + empty bool `json:"-"` + SetupErrors []WorkloadSaaEnrollmentResponseSetupErrorsEnum `json:"setupErrors"` + SetupStatus *WorkloadSaaEnrollmentResponseSetupStatusEnum `json:"setupStatus"` +} + +type jsonWorkloadSaaEnrollmentResponse WorkloadSaaEnrollmentResponse + +func (r *WorkloadSaaEnrollmentResponse) UnmarshalJSON(data []byte) error { + var res jsonWorkloadSaaEnrollmentResponse + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadSaaEnrollmentResponse + } else { + + r.SetupErrors = res.SetupErrors + + r.SetupStatus = res.SetupStatus + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadSaaEnrollmentResponse is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadSaaEnrollmentResponse *WorkloadSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{empty: true} + +func (r *WorkloadSaaEnrollmentResponse) Empty() bool { + return r.empty +} + +func (r *WorkloadSaaEnrollmentResponse) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadSaaEnrollmentResponse) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadComplianceStatus struct { + empty bool `json:"-"` + ActiveViolationCount []int64 `json:"activeViolationCount"` + AcknowledgedViolationCount []int64 `json:"acknowledgedViolationCount"` +} + +type jsonWorkloadComplianceStatus WorkloadComplianceStatus + +func (r *WorkloadComplianceStatus) UnmarshalJSON(data []byte) error { + var res jsonWorkloadComplianceStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadComplianceStatus + } else { + + r.ActiveViolationCount = res.ActiveViolationCount + + r.AcknowledgedViolationCount = res.AcknowledgedViolationCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadComplianceStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadComplianceStatus *WorkloadComplianceStatus = &WorkloadComplianceStatus{empty: true} + +func (r *WorkloadComplianceStatus) Empty() bool { + return r.empty +} + +func (r *WorkloadComplianceStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadComplianceStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadPartnerPermissions struct { + empty bool `json:"-"` + DataLogsViewer *bool `json:"dataLogsViewer"` + ServiceAccessApprover *bool `json:"serviceAccessApprover"` + AssuredWorkloadsMonitoring *bool `json:"assuredWorkloadsMonitoring"` +} + +type jsonWorkloadPartnerPermissions WorkloadPartnerPermissions + +func (r *WorkloadPartnerPermissions) UnmarshalJSON(data []byte) error { + var res jsonWorkloadPartnerPermissions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadPartnerPermissions + } else { + + r.DataLogsViewer = res.DataLogsViewer + + r.ServiceAccessApprover = res.ServiceAccessApprover + + r.AssuredWorkloadsMonitoring = res.AssuredWorkloadsMonitoring + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadPartnerPermissions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadPartnerPermissions *WorkloadPartnerPermissions = &WorkloadPartnerPermissions{empty: true} + +func (r *WorkloadPartnerPermissions) Empty() bool { + return r.empty +} + +func (r *WorkloadPartnerPermissions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadPartnerPermissions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadWorkloadOptions struct { + empty bool `json:"-"` + KajEnrollmentType *WorkloadWorkloadOptionsKajEnrollmentTypeEnum `json:"kajEnrollmentType"` +} + +type jsonWorkloadWorkloadOptions WorkloadWorkloadOptions + +func (r *WorkloadWorkloadOptions) UnmarshalJSON(data []byte) error { + var res jsonWorkloadWorkloadOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadWorkloadOptions + } else { + + r.KajEnrollmentType = res.KajEnrollmentType + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadWorkloadOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadWorkloadOptions *WorkloadWorkloadOptions = &WorkloadWorkloadOptions{empty: true} + +func (r *WorkloadWorkloadOptions) Empty() bool { + return r.empty +} + +func (r *WorkloadWorkloadOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadWorkloadOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkloadEkmProvisioningResponse struct { + empty bool `json:"-"` + EkmProvisioningState *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum `json:"ekmProvisioningState"` + EkmProvisioningErrorDomain *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum `json:"ekmProvisioningErrorDomain"` + EkmProvisioningErrorMapping *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum `json:"ekmProvisioningErrorMapping"` +} + +type jsonWorkloadEkmProvisioningResponse WorkloadEkmProvisioningResponse + +func (r *WorkloadEkmProvisioningResponse) UnmarshalJSON(data []byte) error { + var res jsonWorkloadEkmProvisioningResponse + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkloadEkmProvisioningResponse + } else { + + r.EkmProvisioningState = res.EkmProvisioningState + + r.EkmProvisioningErrorDomain = res.EkmProvisioningErrorDomain + + r.EkmProvisioningErrorMapping = res.EkmProvisioningErrorMapping + + } + return nil +} + +// This object is used to assert a desired state where this WorkloadEkmProvisioningResponse is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkloadEkmProvisioningResponse *WorkloadEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{empty: true} + +func (r *WorkloadEkmProvisioningResponse) Empty() bool { + return r.empty +} + +func (r *WorkloadEkmProvisioningResponse) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkloadEkmProvisioningResponse) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Workload) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "assured_workloads", + Type: "Workload", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "assuredworkloads", +{{- end }} + } +} + +func (r *Workload) ID() (string, error) { + if err := extractWorkloadFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "resources": dcl.ValueOrEmptyString(nr.Resources), + "compliance_regime": dcl.ValueOrEmptyString(nr.ComplianceRegime), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "billing_account": dcl.ValueOrEmptyString(nr.BillingAccount), + "partner_services_billing_account": dcl.ValueOrEmptyString(nr.PartnerServicesBillingAccount), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "provisioned_resources_parent": dcl.ValueOrEmptyString(nr.ProvisionedResourcesParent), + "kms_settings": dcl.ValueOrEmptyString(nr.KmsSettings), + "resource_settings": dcl.ValueOrEmptyString(nr.ResourceSettings), + "kaj_enrollment_state": dcl.ValueOrEmptyString(nr.KajEnrollmentState), + "enable_sovereign_controls": dcl.ValueOrEmptyString(nr.EnableSovereignControls), + "saa_enrollment_response": dcl.ValueOrEmptyString(nr.SaaEnrollmentResponse), + "compliance_status": dcl.ValueOrEmptyString(nr.ComplianceStatus), + "compliant_but_disallowed_services": dcl.ValueOrEmptyString(nr.CompliantButDisallowedServices), + "partner": dcl.ValueOrEmptyString(nr.Partner), + "partner_permissions": dcl.ValueOrEmptyString(nr.PartnerPermissions), + "workload_options": dcl.ValueOrEmptyString(nr.WorkloadOptions), + "ekm_provisioning_response": dcl.ValueOrEmptyString(nr.EkmProvisioningResponse), + "violation_notifications_enabled": dcl.ValueOrEmptyString(nr.ViolationNotificationsEnabled), + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkloadMaxPage = -1 + +type WorkloadList struct { + Items []*Workload + + nextToken string + + pageSize int32 + + resource *Workload +} + +func (l *WorkloadList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkloadList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkload(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkload(ctx context.Context, organization, location string) (*WorkloadList, error) { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkloadWithMaxResults(ctx, organization, location, WorkloadMaxPage) + +} + +func (c *Client) ListWorkloadWithMaxResults(ctx context.Context, organization, location string, pageSize int32) (*WorkloadList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Workload{ + Organization: &organization, + Location: &location, + } + items, token, err := c.listWorkload(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkloadList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkload(ctx context.Context, r *Workload) (*Workload, error) { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkloadFields(r) + + b, err := c.getWorkloadRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkload(b, c, r) + if err != nil { + return nil, err + } + result.Organization = r.Organization + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkloadNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkloadFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkload(ctx context.Context, r *Workload) error { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Workload resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Workload...") + deleteOp := deleteWorkloadOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkload deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkload(ctx context.Context, organization, location string, filter func(*Workload) bool) error { + listObj, err := c.ListWorkload(ctx, organization, location) + if err != nil { + return err + } + + err = c.deleteAllWorkload(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkload(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkload(ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 400: dcl.Retryability{ + Retryable: true, + Pattern: "contains projects or other resources that are not deleted", + Timeout: 300000000000, + }, + }))) + var resultNewState *Workload + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkloadHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkloadHelper(c *Client, ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkload...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkloadFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workloadDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkloadDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workloadApiOperation + if create { + ops = append(ops, &createWorkloadOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkloadDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkloadDiff(c *Client, ctx context.Context, desired *Workload, rawDesired *Workload, ops []workloadApiOperation, opts ...dcl.ApplyOption) (*Workload, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkload(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkloadOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkload(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkloadNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkloadNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkloadDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkloadFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkloadFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkload(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl new file mode 100644 index 000000000000..bda37fa683ac --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload_internal.go.tmpl @@ -0,0 +1,4151 @@ +package assuredworkloads + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Workload) validate() error { + + if err := dcl.Required(r, "displayName"); err != nil { + return err + } + if err := dcl.Required(r, "complianceRegime"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Organization, "Organization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.KmsSettings) { + if err := r.KmsSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SaaEnrollmentResponse) { + if err := r.SaaEnrollmentResponse.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ComplianceStatus) { + if err := r.ComplianceStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PartnerPermissions) { + if err := r.PartnerPermissions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadOptions) { + if err := r.WorkloadOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EkmProvisioningResponse) { + if err := r.EkmProvisioningResponse.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkloadResources) validate() error { + return nil +} +func (r *WorkloadKmsSettings) validate() error { + if err := dcl.Required(r, "nextRotationTime"); err != nil { + return err + } + if err := dcl.Required(r, "rotationPeriod"); err != nil { + return err + } + return nil +} +func (r *WorkloadResourceSettings) validate() error { + return nil +} +func (r *WorkloadSaaEnrollmentResponse) validate() error { + return nil +} +func (r *WorkloadComplianceStatus) validate() error { + return nil +} +func (r *WorkloadPartnerPermissions) validate() error { + return nil +} +func (r *WorkloadWorkloadOptions) validate() error { + return nil +} +func (r *WorkloadEkmProvisioningResponse) validate() error { + return nil +} +func (r *Workload) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-assuredworkloads.googleapis.com/v1beta1/", params) +{{- else }} + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-assuredworkloads.googleapis.com/v1/", params) +{{- end }} +} + +func (r *Workload) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Workload) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads", nr.basePath(), userBasePath, params), nil + +} + +func (r *Workload) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads", nr.basePath(), userBasePath, params), nil + +} + +func (r *Workload) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workloadApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workloadApiOperation interface { + do(context.Context, *Workload, *Client) error +} + +// newUpdateWorkloadUpdateWorkloadRequest creates a request for an +// Workload resource's UpdateWorkload update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateWorkloadUpdateWorkloadRequest(ctx context.Context, f *Workload, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + b, err := c.getWorkloadRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateWorkloadUpdateWorkloadRequest converts the update into +// the final JSON request body. +func marshalUpdateWorkloadUpdateWorkloadRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateWorkloadUpdateWorkloadOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateWorkloadUpdateWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + _, err := c.GetWorkload(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateWorkload") + if err != nil { + return err + } + mask := dcl.UpdateMaskWithPrefix(op.FieldDiffs, "Workload") + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateWorkloadUpdateWorkloadRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateWorkloadUpdateWorkloadRequest(c, req) + if err != nil { + return err + } + _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + return nil +} + +func (c *Client) listWorkloadRaw(ctx context.Context, r *Workload, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkloadMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkloadOperation struct { + Workloads []map[string]interface{} `json:"workloads"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkload(ctx context.Context, r *Workload, pageToken string, pageSize int32) ([]*Workload, string, error) { + b, err := c.listWorkloadRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkloadOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Workload + for _, v := range m.Workloads { + res, err := unmarshalMapWorkload(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Organization = r.Organization + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkload(ctx context.Context, f func(*Workload) bool, resources []*Workload) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkload(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkloadOperation struct{} + +func (op *deleteWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + r, err := c.GetWorkload(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Workload not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkload checking for existence. error: %v", err) + return err + } + + err = r.deleteResources(ctx, c) + if err != nil { + return err + } + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Workload: %w", err) + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkload(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkloadOperation struct { + response map[string]interface{} +} + +func (op *createWorkloadOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkloadOperation) do(ctx context.Context, r *Workload, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Workload with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetWorkload(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkloadRaw(ctx context.Context, r *Workload) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workloadDiffsForRawDesired(ctx context.Context, rawDesired *Workload, opts ...dcl.ApplyOption) (initial, desired *Workload, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Workload + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Workload); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Workload, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeWorkloadDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkload(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Workload resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Workload resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Workload resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkloadDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Workload: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Workload: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkloadFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkloadInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Workload: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkloadDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Workload: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkload(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkloadInitialState(rawInitial, rawDesired *Workload) (*Workload, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkloadDesiredState(rawDesired, rawInitial *Workload, opts ...dcl.ApplyOption) (*Workload, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.KmsSettings = canonicalizeWorkloadKmsSettings(rawDesired.KmsSettings, nil, opts...) + rawDesired.SaaEnrollmentResponse = canonicalizeWorkloadSaaEnrollmentResponse(rawDesired.SaaEnrollmentResponse, nil, opts...) + rawDesired.ComplianceStatus = canonicalizeWorkloadComplianceStatus(rawDesired.ComplianceStatus, nil, opts...) + rawDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, nil, opts...) + rawDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, nil, opts...) + rawDesired.EkmProvisioningResponse = canonicalizeWorkloadEkmProvisioningResponse(rawDesired.EkmProvisioningResponse, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Workload{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.ComplianceRegime) || (dcl.IsEmptyValueIndirect(rawDesired.ComplianceRegime) && dcl.IsEmptyValueIndirect(rawInitial.ComplianceRegime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.ComplianceRegime = rawInitial.ComplianceRegime + } else { + canonicalDesired.ComplianceRegime = rawDesired.ComplianceRegime + } + if dcl.StringCanonicalize(rawDesired.BillingAccount, rawInitial.BillingAccount) { + canonicalDesired.BillingAccount = rawInitial.BillingAccount + } else { + canonicalDesired.BillingAccount = rawDesired.BillingAccount + } + if dcl.StringCanonicalize(rawDesired.PartnerServicesBillingAccount, rawInitial.PartnerServicesBillingAccount) { + canonicalDesired.PartnerServicesBillingAccount = rawInitial.PartnerServicesBillingAccount + } else { + canonicalDesired.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.ProvisionedResourcesParent, rawInitial.ProvisionedResourcesParent) { + canonicalDesired.ProvisionedResourcesParent = rawInitial.ProvisionedResourcesParent + } else { + canonicalDesired.ProvisionedResourcesParent = rawDesired.ProvisionedResourcesParent + } + canonicalDesired.KmsSettings = canonicalizeWorkloadKmsSettings(rawDesired.KmsSettings, rawInitial.KmsSettings, opts...) + canonicalDesired.ResourceSettings = canonicalizeWorkloadResourceSettingsSlice(rawDesired.ResourceSettings, rawInitial.ResourceSettings, opts...) + if dcl.BoolCanonicalize(rawDesired.EnableSovereignControls, rawInitial.EnableSovereignControls) { + canonicalDesired.EnableSovereignControls = rawInitial.EnableSovereignControls + } else { + canonicalDesired.EnableSovereignControls = rawDesired.EnableSovereignControls + } + if dcl.IsZeroValue(rawDesired.Partner) || (dcl.IsEmptyValueIndirect(rawDesired.Partner) && dcl.IsEmptyValueIndirect(rawInitial.Partner)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Partner = rawInitial.Partner + } else { + canonicalDesired.Partner = rawDesired.Partner + } + canonicalDesired.PartnerPermissions = canonicalizeWorkloadPartnerPermissions(rawDesired.PartnerPermissions, rawInitial.PartnerPermissions, opts...) + canonicalDesired.WorkloadOptions = canonicalizeWorkloadWorkloadOptions(rawDesired.WorkloadOptions, rawInitial.WorkloadOptions, opts...) + if dcl.BoolCanonicalize(rawDesired.ViolationNotificationsEnabled, rawInitial.ViolationNotificationsEnabled) { + canonicalDesired.ViolationNotificationsEnabled = rawInitial.ViolationNotificationsEnabled + } else { + canonicalDesired.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } + if dcl.NameToSelfLink(rawDesired.Organization, rawInitial.Organization) { + canonicalDesired.Organization = rawInitial.Organization + } else { + canonicalDesired.Organization = rawDesired.Organization + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeWorkloadNewState(c *Client, rawNew, rawDesired *Workload) (*Workload, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Resources) && dcl.IsEmptyValueIndirect(rawDesired.Resources) { + rawNew.Resources = rawDesired.Resources + } else { + rawNew.Resources = canonicalizeNewWorkloadResourcesSlice(c, rawDesired.Resources, rawNew.Resources) + } + + if dcl.IsEmptyValueIndirect(rawNew.ComplianceRegime) && dcl.IsEmptyValueIndirect(rawDesired.ComplianceRegime) { + rawNew.ComplianceRegime = rawDesired.ComplianceRegime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.BillingAccount) && dcl.IsEmptyValueIndirect(rawDesired.BillingAccount) { + rawNew.BillingAccount = rawDesired.BillingAccount + } else { + rawNew.BillingAccount = rawDesired.BillingAccount + } + + if dcl.IsEmptyValueIndirect(rawNew.PartnerServicesBillingAccount) && dcl.IsEmptyValueIndirect(rawDesired.PartnerServicesBillingAccount) { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } else { + rawNew.PartnerServicesBillingAccount = rawDesired.PartnerServicesBillingAccount + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + rawNew.ProvisionedResourcesParent = rawDesired.ProvisionedResourcesParent + + rawNew.KmsSettings = rawDesired.KmsSettings + + rawNew.ResourceSettings = rawDesired.ResourceSettings + + if dcl.IsEmptyValueIndirect(rawNew.KajEnrollmentState) && dcl.IsEmptyValueIndirect(rawDesired.KajEnrollmentState) { + rawNew.KajEnrollmentState = rawDesired.KajEnrollmentState + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.EnableSovereignControls) && dcl.IsEmptyValueIndirect(rawDesired.EnableSovereignControls) { + rawNew.EnableSovereignControls = rawDesired.EnableSovereignControls + } else { + if dcl.BoolCanonicalize(rawDesired.EnableSovereignControls, rawNew.EnableSovereignControls) { + rawNew.EnableSovereignControls = rawDesired.EnableSovereignControls + } + } + + if dcl.IsEmptyValueIndirect(rawNew.SaaEnrollmentResponse) && dcl.IsEmptyValueIndirect(rawDesired.SaaEnrollmentResponse) { + rawNew.SaaEnrollmentResponse = rawDesired.SaaEnrollmentResponse + } else { + rawNew.SaaEnrollmentResponse = canonicalizeNewWorkloadSaaEnrollmentResponse(c, rawDesired.SaaEnrollmentResponse, rawNew.SaaEnrollmentResponse) + } + + if dcl.IsEmptyValueIndirect(rawNew.ComplianceStatus) && dcl.IsEmptyValueIndirect(rawDesired.ComplianceStatus) { + rawNew.ComplianceStatus = rawDesired.ComplianceStatus + } else { + rawNew.ComplianceStatus = canonicalizeNewWorkloadComplianceStatus(c, rawDesired.ComplianceStatus, rawNew.ComplianceStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.CompliantButDisallowedServices) && dcl.IsEmptyValueIndirect(rawDesired.CompliantButDisallowedServices) { + rawNew.CompliantButDisallowedServices = rawDesired.CompliantButDisallowedServices + } else { + if dcl.StringArrayCanonicalize(rawDesired.CompliantButDisallowedServices, rawNew.CompliantButDisallowedServices) { + rawNew.CompliantButDisallowedServices = rawDesired.CompliantButDisallowedServices + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Partner) && dcl.IsEmptyValueIndirect(rawDesired.Partner) { + rawNew.Partner = rawDesired.Partner + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.PartnerPermissions) && dcl.IsEmptyValueIndirect(rawDesired.PartnerPermissions) { + rawNew.PartnerPermissions = rawDesired.PartnerPermissions + } else { + rawNew.PartnerPermissions = canonicalizeNewWorkloadPartnerPermissions(c, rawDesired.PartnerPermissions, rawNew.PartnerPermissions) + } + + rawNew.WorkloadOptions = rawDesired.WorkloadOptions + + if dcl.IsEmptyValueIndirect(rawNew.EkmProvisioningResponse) && dcl.IsEmptyValueIndirect(rawDesired.EkmProvisioningResponse) { + rawNew.EkmProvisioningResponse = rawDesired.EkmProvisioningResponse + } else { + rawNew.EkmProvisioningResponse = canonicalizeNewWorkloadEkmProvisioningResponse(c, rawDesired.EkmProvisioningResponse, rawNew.EkmProvisioningResponse) + } + + if dcl.IsEmptyValueIndirect(rawNew.ViolationNotificationsEnabled) && dcl.IsEmptyValueIndirect(rawDesired.ViolationNotificationsEnabled) { + rawNew.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } else { + if dcl.BoolCanonicalize(rawDesired.ViolationNotificationsEnabled, rawNew.ViolationNotificationsEnabled) { + rawNew.ViolationNotificationsEnabled = rawDesired.ViolationNotificationsEnabled + } + } + + rawNew.Organization = rawDesired.Organization + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkloadResources(des, initial *WorkloadResources, opts ...dcl.ApplyOption) *WorkloadResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadResources{} + + if dcl.IsZeroValue(des.ResourceId) || (dcl.IsEmptyValueIndirect(des.ResourceId) && dcl.IsEmptyValueIndirect(initial.ResourceId)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceId = initial.ResourceId + } else { + cDes.ResourceId = des.ResourceId + } + if dcl.IsZeroValue(des.ResourceType) || (dcl.IsEmptyValueIndirect(des.ResourceType) && dcl.IsEmptyValueIndirect(initial.ResourceType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceType = initial.ResourceType + } else { + cDes.ResourceType = des.ResourceType + } + + return cDes +} + +func canonicalizeWorkloadResourcesSlice(des, initial []WorkloadResources, opts ...dcl.ApplyOption) []WorkloadResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadResources(c *Client, des, nw *WorkloadResources) *WorkloadResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadResourcesSet(c *Client, des, nw []WorkloadResources) []WorkloadResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadResourcesSlice(c *Client, des, nw []WorkloadResources) []WorkloadResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadResources(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadKmsSettings(des, initial *WorkloadKmsSettings, opts ...dcl.ApplyOption) *WorkloadKmsSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadKmsSettings{} + + if dcl.IsZeroValue(des.NextRotationTime) || (dcl.IsEmptyValueIndirect(des.NextRotationTime) && dcl.IsEmptyValueIndirect(initial.NextRotationTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NextRotationTime = initial.NextRotationTime + } else { + cDes.NextRotationTime = des.NextRotationTime + } + if dcl.StringCanonicalize(des.RotationPeriod, initial.RotationPeriod) || dcl.IsZeroValue(des.RotationPeriod) { + cDes.RotationPeriod = initial.RotationPeriod + } else { + cDes.RotationPeriod = des.RotationPeriod + } + + return cDes +} + +func canonicalizeWorkloadKmsSettingsSlice(des, initial []WorkloadKmsSettings, opts ...dcl.ApplyOption) []WorkloadKmsSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadKmsSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadKmsSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadKmsSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadKmsSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadKmsSettings(c *Client, des, nw *WorkloadKmsSettings) *WorkloadKmsSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadKmsSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.RotationPeriod, nw.RotationPeriod) { + nw.RotationPeriod = des.RotationPeriod + } + + return nw +} + +func canonicalizeNewWorkloadKmsSettingsSet(c *Client, des, nw []WorkloadKmsSettings) []WorkloadKmsSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadKmsSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadKmsSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadKmsSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadKmsSettingsSlice(c *Client, des, nw []WorkloadKmsSettings) []WorkloadKmsSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadKmsSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadKmsSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadResourceSettings(des, initial *WorkloadResourceSettings, opts ...dcl.ApplyOption) *WorkloadResourceSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadResourceSettings{} + + if dcl.StringCanonicalize(des.ResourceId, initial.ResourceId) || dcl.IsZeroValue(des.ResourceId) { + cDes.ResourceId = initial.ResourceId + } else { + cDes.ResourceId = des.ResourceId + } + if dcl.IsZeroValue(des.ResourceType) || (dcl.IsEmptyValueIndirect(des.ResourceType) && dcl.IsEmptyValueIndirect(initial.ResourceType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceType = initial.ResourceType + } else { + cDes.ResourceType = des.ResourceType + } + if dcl.StringCanonicalize(des.DisplayName, initial.DisplayName) || dcl.IsZeroValue(des.DisplayName) { + cDes.DisplayName = initial.DisplayName + } else { + cDes.DisplayName = des.DisplayName + } + + return cDes +} + +func canonicalizeWorkloadResourceSettingsSlice(des, initial []WorkloadResourceSettings, opts ...dcl.ApplyOption) []WorkloadResourceSettings { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadResourceSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadResourceSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadResourceSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadResourceSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadResourceSettings(c *Client, des, nw *WorkloadResourceSettings) *WorkloadResourceSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadResourceSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceId, nw.ResourceId) { + nw.ResourceId = des.ResourceId + } + if dcl.StringCanonicalize(des.DisplayName, nw.DisplayName) { + nw.DisplayName = des.DisplayName + } + + return nw +} + +func canonicalizeNewWorkloadResourceSettingsSet(c *Client, des, nw []WorkloadResourceSettings) []WorkloadResourceSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadResourceSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadResourceSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadResourceSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadResourceSettingsSlice(c *Client, des, nw []WorkloadResourceSettings) []WorkloadResourceSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadResourceSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadResourceSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadSaaEnrollmentResponse(des, initial *WorkloadSaaEnrollmentResponse, opts ...dcl.ApplyOption) *WorkloadSaaEnrollmentResponse { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadSaaEnrollmentResponse{} + + if dcl.IsZeroValue(des.SetupErrors) || (dcl.IsEmptyValueIndirect(des.SetupErrors) && dcl.IsEmptyValueIndirect(initial.SetupErrors)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SetupErrors = initial.SetupErrors + } else { + cDes.SetupErrors = des.SetupErrors + } + if dcl.IsZeroValue(des.SetupStatus) || (dcl.IsEmptyValueIndirect(des.SetupStatus) && dcl.IsEmptyValueIndirect(initial.SetupStatus)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SetupStatus = initial.SetupStatus + } else { + cDes.SetupStatus = des.SetupStatus + } + + return cDes +} + +func canonicalizeWorkloadSaaEnrollmentResponseSlice(des, initial []WorkloadSaaEnrollmentResponse, opts ...dcl.ApplyOption) []WorkloadSaaEnrollmentResponse { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadSaaEnrollmentResponse(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadSaaEnrollmentResponse(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadSaaEnrollmentResponse(c *Client, des, nw *WorkloadSaaEnrollmentResponse) *WorkloadSaaEnrollmentResponse { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadSaaEnrollmentResponse while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadSaaEnrollmentResponseSet(c *Client, des, nw []WorkloadSaaEnrollmentResponse) []WorkloadSaaEnrollmentResponse { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadSaaEnrollmentResponse + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadSaaEnrollmentResponseNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadSaaEnrollmentResponse(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadSaaEnrollmentResponseSlice(c *Client, des, nw []WorkloadSaaEnrollmentResponse) []WorkloadSaaEnrollmentResponse { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadSaaEnrollmentResponse + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadSaaEnrollmentResponse(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadComplianceStatus(des, initial *WorkloadComplianceStatus, opts ...dcl.ApplyOption) *WorkloadComplianceStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadComplianceStatus{} + + if dcl.IsZeroValue(des.ActiveViolationCount) || (dcl.IsEmptyValueIndirect(des.ActiveViolationCount) && dcl.IsEmptyValueIndirect(initial.ActiveViolationCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveViolationCount = initial.ActiveViolationCount + } else { + cDes.ActiveViolationCount = des.ActiveViolationCount + } + if dcl.IsZeroValue(des.AcknowledgedViolationCount) || (dcl.IsEmptyValueIndirect(des.AcknowledgedViolationCount) && dcl.IsEmptyValueIndirect(initial.AcknowledgedViolationCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcknowledgedViolationCount = initial.AcknowledgedViolationCount + } else { + cDes.AcknowledgedViolationCount = des.AcknowledgedViolationCount + } + + return cDes +} + +func canonicalizeWorkloadComplianceStatusSlice(des, initial []WorkloadComplianceStatus, opts ...dcl.ApplyOption) []WorkloadComplianceStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadComplianceStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadComplianceStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadComplianceStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadComplianceStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadComplianceStatus(c *Client, des, nw *WorkloadComplianceStatus) *WorkloadComplianceStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadComplianceStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadComplianceStatusSet(c *Client, des, nw []WorkloadComplianceStatus) []WorkloadComplianceStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadComplianceStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadComplianceStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadComplianceStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadComplianceStatusSlice(c *Client, des, nw []WorkloadComplianceStatus) []WorkloadComplianceStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadComplianceStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadComplianceStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadPartnerPermissions(des, initial *WorkloadPartnerPermissions, opts ...dcl.ApplyOption) *WorkloadPartnerPermissions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadPartnerPermissions{} + + if dcl.BoolCanonicalize(des.DataLogsViewer, initial.DataLogsViewer) || dcl.IsZeroValue(des.DataLogsViewer) { + cDes.DataLogsViewer = initial.DataLogsViewer + } else { + cDes.DataLogsViewer = des.DataLogsViewer + } + if dcl.BoolCanonicalize(des.ServiceAccessApprover, initial.ServiceAccessApprover) || dcl.IsZeroValue(des.ServiceAccessApprover) { + cDes.ServiceAccessApprover = initial.ServiceAccessApprover + } else { + cDes.ServiceAccessApprover = des.ServiceAccessApprover + } + if dcl.BoolCanonicalize(des.AssuredWorkloadsMonitoring, initial.AssuredWorkloadsMonitoring) || dcl.IsZeroValue(des.AssuredWorkloadsMonitoring) { + cDes.AssuredWorkloadsMonitoring = initial.AssuredWorkloadsMonitoring + } else { + cDes.AssuredWorkloadsMonitoring = des.AssuredWorkloadsMonitoring + } + + return cDes +} + +func canonicalizeWorkloadPartnerPermissionsSlice(des, initial []WorkloadPartnerPermissions, opts ...dcl.ApplyOption) []WorkloadPartnerPermissions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadPartnerPermissions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadPartnerPermissions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadPartnerPermissions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadPartnerPermissions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadPartnerPermissions(c *Client, des, nw *WorkloadPartnerPermissions) *WorkloadPartnerPermissions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadPartnerPermissions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.DataLogsViewer, nw.DataLogsViewer) { + nw.DataLogsViewer = des.DataLogsViewer + } + if dcl.BoolCanonicalize(des.ServiceAccessApprover, nw.ServiceAccessApprover) { + nw.ServiceAccessApprover = des.ServiceAccessApprover + } + if dcl.BoolCanonicalize(des.AssuredWorkloadsMonitoring, nw.AssuredWorkloadsMonitoring) { + nw.AssuredWorkloadsMonitoring = des.AssuredWorkloadsMonitoring + } + + return nw +} + +func canonicalizeNewWorkloadPartnerPermissionsSet(c *Client, des, nw []WorkloadPartnerPermissions) []WorkloadPartnerPermissions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadPartnerPermissions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadPartnerPermissionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadPartnerPermissions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadPartnerPermissionsSlice(c *Client, des, nw []WorkloadPartnerPermissions) []WorkloadPartnerPermissions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadPartnerPermissions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadPartnerPermissions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadWorkloadOptions(des, initial *WorkloadWorkloadOptions, opts ...dcl.ApplyOption) *WorkloadWorkloadOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadWorkloadOptions{} + + if dcl.IsZeroValue(des.KajEnrollmentType) || (dcl.IsEmptyValueIndirect(des.KajEnrollmentType) && dcl.IsEmptyValueIndirect(initial.KajEnrollmentType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KajEnrollmentType = initial.KajEnrollmentType + } else { + cDes.KajEnrollmentType = des.KajEnrollmentType + } + + return cDes +} + +func canonicalizeWorkloadWorkloadOptionsSlice(des, initial []WorkloadWorkloadOptions, opts ...dcl.ApplyOption) []WorkloadWorkloadOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadWorkloadOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadWorkloadOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadWorkloadOptions(c *Client, des, nw *WorkloadWorkloadOptions) *WorkloadWorkloadOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadWorkloadOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadWorkloadOptionsSet(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadWorkloadOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadWorkloadOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadWorkloadOptionsSlice(c *Client, des, nw []WorkloadWorkloadOptions) []WorkloadWorkloadOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadWorkloadOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadWorkloadOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkloadEkmProvisioningResponse(des, initial *WorkloadEkmProvisioningResponse, opts ...dcl.ApplyOption) *WorkloadEkmProvisioningResponse { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkloadEkmProvisioningResponse{} + + if dcl.IsZeroValue(des.EkmProvisioningState) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningState) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningState)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningState = initial.EkmProvisioningState + } else { + cDes.EkmProvisioningState = des.EkmProvisioningState + } + if dcl.IsZeroValue(des.EkmProvisioningErrorDomain) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningErrorDomain) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningErrorDomain)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningErrorDomain = initial.EkmProvisioningErrorDomain + } else { + cDes.EkmProvisioningErrorDomain = des.EkmProvisioningErrorDomain + } + if dcl.IsZeroValue(des.EkmProvisioningErrorMapping) || (dcl.IsEmptyValueIndirect(des.EkmProvisioningErrorMapping) && dcl.IsEmptyValueIndirect(initial.EkmProvisioningErrorMapping)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EkmProvisioningErrorMapping = initial.EkmProvisioningErrorMapping + } else { + cDes.EkmProvisioningErrorMapping = des.EkmProvisioningErrorMapping + } + + return cDes +} + +func canonicalizeWorkloadEkmProvisioningResponseSlice(des, initial []WorkloadEkmProvisioningResponse, opts ...dcl.ApplyOption) []WorkloadEkmProvisioningResponse { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkloadEkmProvisioningResponse, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkloadEkmProvisioningResponse(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkloadEkmProvisioningResponse, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkloadEkmProvisioningResponse(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkloadEkmProvisioningResponse(c *Client, des, nw *WorkloadEkmProvisioningResponse) *WorkloadEkmProvisioningResponse { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkloadEkmProvisioningResponse while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkloadEkmProvisioningResponseSet(c *Client, des, nw []WorkloadEkmProvisioningResponse) []WorkloadEkmProvisioningResponse { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkloadEkmProvisioningResponse + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkloadEkmProvisioningResponseNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkloadEkmProvisioningResponse(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkloadEkmProvisioningResponseSlice(c *Client, des, nw []WorkloadEkmProvisioningResponse) []WorkloadEkmProvisioningResponse { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkloadEkmProvisioningResponse + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkloadEkmProvisioningResponse(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkload(c *Client, desired, actual *Workload, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkloadUpdateWorkloadOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Resources, actual.Resources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadResourcesNewStyle, EmptyObject: EmptyWorkloadResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Resources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ComplianceRegime, actual.ComplianceRegime, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComplianceRegime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.BillingAccount, actual.BillingAccount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BillingAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PartnerServicesBillingAccount, actual.PartnerServicesBillingAccount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PartnerServicesBillingAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkloadUpdateWorkloadOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProvisionedResourcesParent, actual.ProvisionedResourcesParent, dcl.DiffInfo{Ignore: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProvisionedResourcesParent")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsSettings, actual.KmsSettings, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadKmsSettingsNewStyle, EmptyObject: EmptyWorkloadKmsSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSettings, actual.ResourceSettings, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadResourceSettingsNewStyle, EmptyObject: EmptyWorkloadResourceSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KajEnrollmentState, actual.KajEnrollmentState, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KajEnrollmentState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableSovereignControls, actual.EnableSovereignControls, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSovereignControls")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SaaEnrollmentResponse, actual.SaaEnrollmentResponse, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadSaaEnrollmentResponseNewStyle, EmptyObject: EmptyWorkloadSaaEnrollmentResponse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SaaEnrollmentResponse")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ComplianceStatus, actual.ComplianceStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadComplianceStatusNewStyle, EmptyObject: EmptyWorkloadComplianceStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComplianceStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CompliantButDisallowedServices, actual.CompliantButDisallowedServices, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CompliantButDisallowedServices")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Partner, actual.Partner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Partner")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PartnerPermissions, actual.PartnerPermissions, dcl.DiffInfo{ObjectFunction: compareWorkloadPartnerPermissionsNewStyle, EmptyObject: EmptyWorkloadPartnerPermissions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PartnerPermissions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadOptions, actual.WorkloadOptions, dcl.DiffInfo{Ignore: true, ObjectFunction: compareWorkloadWorkloadOptionsNewStyle, EmptyObject: EmptyWorkloadWorkloadOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningResponse, actual.EkmProvisioningResponse, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareWorkloadEkmProvisioningResponseNewStyle, EmptyObject: EmptyWorkloadEkmProvisioningResponse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningResponse")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ViolationNotificationsEnabled, actual.ViolationNotificationsEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ViolationNotificationsEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Organization, actual.Organization, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Organization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkloadResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadResources) + if !ok { + desiredNotPointer, ok := d.(WorkloadResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResources or *WorkloadResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadResources) + if !ok { + actualNotPointer, ok := a.(WorkloadResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceId, actual.ResourceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceType, actual.ResourceType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadKmsSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadKmsSettings) + if !ok { + desiredNotPointer, ok := d.(WorkloadKmsSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadKmsSettings or *WorkloadKmsSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadKmsSettings) + if !ok { + actualNotPointer, ok := a.(WorkloadKmsSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadKmsSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NextRotationTime, actual.NextRotationTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NextRotationTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RotationPeriod, actual.RotationPeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RotationPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadResourceSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadResourceSettings) + if !ok { + desiredNotPointer, ok := d.(WorkloadResourceSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResourceSettings or *WorkloadResourceSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadResourceSettings) + if !ok { + actualNotPointer, ok := a.(WorkloadResourceSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadResourceSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceId, actual.ResourceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceType, actual.ResourceType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadSaaEnrollmentResponseNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadSaaEnrollmentResponse) + if !ok { + desiredNotPointer, ok := d.(WorkloadSaaEnrollmentResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadSaaEnrollmentResponse or *WorkloadSaaEnrollmentResponse", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadSaaEnrollmentResponse) + if !ok { + actualNotPointer, ok := a.(WorkloadSaaEnrollmentResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadSaaEnrollmentResponse", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SetupErrors, actual.SetupErrors, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SetupErrors")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SetupStatus, actual.SetupStatus, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SetupStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadComplianceStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadComplianceStatus) + if !ok { + desiredNotPointer, ok := d.(WorkloadComplianceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadComplianceStatus or *WorkloadComplianceStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadComplianceStatus) + if !ok { + actualNotPointer, ok := a.(WorkloadComplianceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadComplianceStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ActiveViolationCount, actual.ActiveViolationCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ActiveViolationCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcknowledgedViolationCount, actual.AcknowledgedViolationCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcknowledgedViolationCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadPartnerPermissionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadPartnerPermissions) + if !ok { + desiredNotPointer, ok := d.(WorkloadPartnerPermissions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadPartnerPermissions or *WorkloadPartnerPermissions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadPartnerPermissions) + if !ok { + actualNotPointer, ok := a.(WorkloadPartnerPermissions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadPartnerPermissions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataLogsViewer, actual.DataLogsViewer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataLogsViewer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccessApprover, actual.ServiceAccessApprover, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccessApprover")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssuredWorkloadsMonitoring, actual.AssuredWorkloadsMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssuredWorkloadsMonitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadWorkloadOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadWorkloadOptions) + if !ok { + desiredNotPointer, ok := d.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions or *WorkloadWorkloadOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadWorkloadOptions) + if !ok { + actualNotPointer, ok := a.(WorkloadWorkloadOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadWorkloadOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KajEnrollmentType, actual.KajEnrollmentType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KajEnrollmentType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkloadEkmProvisioningResponseNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkloadEkmProvisioningResponse) + if !ok { + desiredNotPointer, ok := d.(WorkloadEkmProvisioningResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadEkmProvisioningResponse or *WorkloadEkmProvisioningResponse", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkloadEkmProvisioningResponse) + if !ok { + actualNotPointer, ok := a.(WorkloadEkmProvisioningResponse) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkloadEkmProvisioningResponse", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EkmProvisioningState, actual.EkmProvisioningState, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningErrorDomain, actual.EkmProvisioningErrorDomain, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningErrorDomain")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EkmProvisioningErrorMapping, actual.EkmProvisioningErrorMapping, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EkmProvisioningErrorMapping")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Workload) urlNormalized() *Workload { + normalized := dcl.Copy(*r).(Workload) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.BillingAccount = dcl.SelfLinkToName(r.BillingAccount) + normalized.PartnerServicesBillingAccount = dcl.SelfLinkToName(r.PartnerServicesBillingAccount) + normalized.ProvisionedResourcesParent = dcl.SelfLinkToName(r.ProvisionedResourcesParent) + normalized.Organization = dcl.SelfLinkToName(r.Organization) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Workload) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateWorkload" { + fields := map[string]interface{}{ + "organization": dcl.ValueOrEmptyString(nr.Organization), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Workload resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Workload) marshal(c *Client) ([]byte, error) { + m, err := expandWorkload(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Workload: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkload decodes JSON responses into the Workload resource schema. +func unmarshalWorkload(b []byte, c *Client, res *Workload) (*Workload, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkload(m, c, res) +} + +func unmarshalMapWorkload(m map[string]interface{}, c *Client, res *Workload) (*Workload, error) { + + flattened := flattenWorkload(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkload expands Workload into a JSON request object. +func expandWorkload(c *Client, f *Workload) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("organizations/%s/locations/%s/workloads/%s", f.Name, dcl.SelfLinkToName(f.Organization), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.ComplianceRegime; dcl.ValueShouldBeSent(v) { + m["complianceRegime"] = v + } + if v := f.BillingAccount; dcl.ValueShouldBeSent(v) { + m["billingAccount"] = v + } + if v := f.PartnerServicesBillingAccount; dcl.ValueShouldBeSent(v) { + m["partnerServicesBillingAccount"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.ProvisionedResourcesParent; dcl.ValueShouldBeSent(v) { + m["provisionedResourcesParent"] = v + } + if v, err := expandWorkloadKmsSettings(c, f.KmsSettings, res); err != nil { + return nil, fmt.Errorf("error expanding KmsSettings into kmsSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kmsSettings"] = v + } + if v, err := expandWorkloadResourceSettingsSlice(c, f.ResourceSettings, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSettings into resourceSettings: %w", err) + } else if v != nil { + m["resourceSettings"] = v + } + if v := f.EnableSovereignControls; dcl.ValueShouldBeSent(v) { + m["enableSovereignControls"] = v + } + if v := f.Partner; dcl.ValueShouldBeSent(v) { + m["partner"] = v + } + if v, err := expandWorkloadPartnerPermissions(c, f.PartnerPermissions, res); err != nil { + return nil, fmt.Errorf("error expanding PartnerPermissions into partnerPermissions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["partnerPermissions"] = v + } + if v, err := expandWorkloadWorkloadOptions(c, f.WorkloadOptions, res); err != nil { + return nil, fmt.Errorf("error expanding WorkloadOptions into workloadOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workloadOptions"] = v + } + if v := f.ViolationNotificationsEnabled; dcl.ValueShouldBeSent(v) { + m["violationNotificationsEnabled"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Organization into organization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["organization"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkload flattens Workload from a JSON request object into the +// Workload type. +func flattenWorkload(c *Client, i interface{}, res *Workload) *Workload { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Workload{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Resources = flattenWorkloadResourcesSlice(c, m["resources"], res) + resultRes.ComplianceRegime = flattenWorkloadComplianceRegimeEnum(m["complianceRegime"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.BillingAccount = dcl.FlattenString(m["billingAccount"]) + resultRes.PartnerServicesBillingAccount = dcl.FlattenString(m["partnerServicesBillingAccount"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.ProvisionedResourcesParent = dcl.FlattenSecretValue(m["provisionedResourcesParent"]) + resultRes.KmsSettings = flattenWorkloadKmsSettings(c, m["kmsSettings"], res) + resultRes.ResourceSettings = flattenWorkloadResourceSettingsSlice(c, m["resourceSettings"], res) + resultRes.KajEnrollmentState = flattenWorkloadKajEnrollmentStateEnum(m["kajEnrollmentState"]) + resultRes.EnableSovereignControls = dcl.FlattenBool(m["enableSovereignControls"]) + resultRes.SaaEnrollmentResponse = flattenWorkloadSaaEnrollmentResponse(c, m["saaEnrollmentResponse"], res) + resultRes.ComplianceStatus = flattenWorkloadComplianceStatus(c, m["complianceStatus"], res) + resultRes.CompliantButDisallowedServices = dcl.FlattenStringSlice(m["compliantButDisallowedServices"]) + resultRes.Partner = flattenWorkloadPartnerEnum(m["partner"]) + resultRes.PartnerPermissions = flattenWorkloadPartnerPermissions(c, m["partnerPermissions"], res) + resultRes.WorkloadOptions = flattenWorkloadWorkloadOptions(c, m["workloadOptions"], res) + resultRes.EkmProvisioningResponse = flattenWorkloadEkmProvisioningResponse(c, m["ekmProvisioningResponse"], res) + resultRes.ViolationNotificationsEnabled = dcl.FlattenBool(m["violationNotificationsEnabled"]) + resultRes.Organization = dcl.FlattenString(m["organization"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkloadResourcesMap expands the contents of WorkloadResources into a JSON +// request object. +func expandWorkloadResourcesMap(c *Client, f map[string]WorkloadResources, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadResourcesSlice expands the contents of WorkloadResources into a JSON +// request object. +func expandWorkloadResourcesSlice(c *Client, f []WorkloadResources, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadResourcesMap flattens the contents of WorkloadResources from a JSON +// response object. +func flattenWorkloadResourcesMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResources{} + } + + if len(a) == 0 { + return map[string]WorkloadResources{} + } + + items := make(map[string]WorkloadResources) + for k, item := range a { + items[k] = *flattenWorkloadResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadResourcesSlice flattens the contents of WorkloadResources from a JSON +// response object. +func flattenWorkloadResourcesSlice(c *Client, i interface{}, res *Workload) []WorkloadResources { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResources{} + } + + if len(a) == 0 { + return []WorkloadResources{} + } + + items := make([]WorkloadResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadResources expands an instance of WorkloadResources into a JSON +// request object. +func expandWorkloadResources(c *Client, f *WorkloadResources, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceId; !dcl.IsEmptyValueIndirect(v) { + m["resourceId"] = v + } + if v := f.ResourceType; !dcl.IsEmptyValueIndirect(v) { + m["resourceType"] = v + } + + return m, nil +} + +// flattenWorkloadResources flattens an instance of WorkloadResources from a JSON +// response object. +func flattenWorkloadResources(c *Client, i interface{}, res *Workload) *WorkloadResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadResources + } + r.ResourceId = dcl.FlattenInteger(m["resourceId"]) + r.ResourceType = flattenWorkloadResourcesResourceTypeEnum(m["resourceType"]) + + return r +} + +// expandWorkloadKmsSettingsMap expands the contents of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettingsMap(c *Client, f map[string]WorkloadKmsSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadKmsSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadKmsSettingsSlice expands the contents of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettingsSlice(c *Client, f []WorkloadKmsSettings, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadKmsSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadKmsSettingsMap flattens the contents of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettingsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadKmsSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadKmsSettings{} + } + + if len(a) == 0 { + return map[string]WorkloadKmsSettings{} + } + + items := make(map[string]WorkloadKmsSettings) + for k, item := range a { + items[k] = *flattenWorkloadKmsSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadKmsSettingsSlice flattens the contents of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettingsSlice(c *Client, i interface{}, res *Workload) []WorkloadKmsSettings { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadKmsSettings{} + } + + if len(a) == 0 { + return []WorkloadKmsSettings{} + } + + items := make([]WorkloadKmsSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadKmsSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadKmsSettings expands an instance of WorkloadKmsSettings into a JSON +// request object. +func expandWorkloadKmsSettings(c *Client, f *WorkloadKmsSettings, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NextRotationTime; !dcl.IsEmptyValueIndirect(v) { + m["nextRotationTime"] = v + } + if v := f.RotationPeriod; !dcl.IsEmptyValueIndirect(v) { + m["rotationPeriod"] = v + } + + return m, nil +} + +// flattenWorkloadKmsSettings flattens an instance of WorkloadKmsSettings from a JSON +// response object. +func flattenWorkloadKmsSettings(c *Client, i interface{}, res *Workload) *WorkloadKmsSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadKmsSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadKmsSettings + } + r.NextRotationTime = dcl.FlattenString(m["nextRotationTime"]) + r.RotationPeriod = dcl.FlattenString(m["rotationPeriod"]) + + return r +} + +// expandWorkloadResourceSettingsMap expands the contents of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettingsMap(c *Client, f map[string]WorkloadResourceSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadResourceSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadResourceSettingsSlice expands the contents of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettingsSlice(c *Client, f []WorkloadResourceSettings, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadResourceSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadResourceSettingsMap flattens the contents of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettingsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourceSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourceSettings{} + } + + if len(a) == 0 { + return map[string]WorkloadResourceSettings{} + } + + items := make(map[string]WorkloadResourceSettings) + for k, item := range a { + items[k] = *flattenWorkloadResourceSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadResourceSettingsSlice flattens the contents of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettingsSlice(c *Client, i interface{}, res *Workload) []WorkloadResourceSettings { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourceSettings{} + } + + if len(a) == 0 { + return []WorkloadResourceSettings{} + } + + items := make([]WorkloadResourceSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourceSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadResourceSettings expands an instance of WorkloadResourceSettings into a JSON +// request object. +func expandWorkloadResourceSettings(c *Client, f *WorkloadResourceSettings, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceId; !dcl.IsEmptyValueIndirect(v) { + m["resourceId"] = v + } + if v := f.ResourceType; !dcl.IsEmptyValueIndirect(v) { + m["resourceType"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + m["displayName"] = v + } + + return m, nil +} + +// flattenWorkloadResourceSettings flattens an instance of WorkloadResourceSettings from a JSON +// response object. +func flattenWorkloadResourceSettings(c *Client, i interface{}, res *Workload) *WorkloadResourceSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadResourceSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadResourceSettings + } + r.ResourceId = dcl.FlattenString(m["resourceId"]) + r.ResourceType = flattenWorkloadResourceSettingsResourceTypeEnum(m["resourceType"]) + r.DisplayName = dcl.FlattenString(m["displayName"]) + + return r +} + +// expandWorkloadSaaEnrollmentResponseMap expands the contents of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponseMap(c *Client, f map[string]WorkloadSaaEnrollmentResponse, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadSaaEnrollmentResponse(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadSaaEnrollmentResponseSlice expands the contents of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponseSlice(c *Client, f []WorkloadSaaEnrollmentResponse, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadSaaEnrollmentResponse(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadSaaEnrollmentResponseMap flattens the contents of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponse { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponse{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponse{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponse) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponse(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSlice flattens the contents of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponse { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponse{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponse{} + } + + items := make([]WorkloadSaaEnrollmentResponse, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponse(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadSaaEnrollmentResponse expands an instance of WorkloadSaaEnrollmentResponse into a JSON +// request object. +func expandWorkloadSaaEnrollmentResponse(c *Client, f *WorkloadSaaEnrollmentResponse, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SetupErrors; v != nil { + m["setupErrors"] = v + } + if v := f.SetupStatus; !dcl.IsEmptyValueIndirect(v) { + m["setupStatus"] = v + } + + return m, nil +} + +// flattenWorkloadSaaEnrollmentResponse flattens an instance of WorkloadSaaEnrollmentResponse from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponse(c *Client, i interface{}, res *Workload) *WorkloadSaaEnrollmentResponse { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadSaaEnrollmentResponse{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadSaaEnrollmentResponse + } + r.SetupErrors = flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice(c, m["setupErrors"], res) + r.SetupStatus = flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(m["setupStatus"]) + + return r +} + +// expandWorkloadComplianceStatusMap expands the contents of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatusMap(c *Client, f map[string]WorkloadComplianceStatus, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadComplianceStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadComplianceStatusSlice expands the contents of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatusSlice(c *Client, f []WorkloadComplianceStatus, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadComplianceStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadComplianceStatusMap flattens the contents of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatusMap(c *Client, i interface{}, res *Workload) map[string]WorkloadComplianceStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadComplianceStatus{} + } + + if len(a) == 0 { + return map[string]WorkloadComplianceStatus{} + } + + items := make(map[string]WorkloadComplianceStatus) + for k, item := range a { + items[k] = *flattenWorkloadComplianceStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadComplianceStatusSlice flattens the contents of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatusSlice(c *Client, i interface{}, res *Workload) []WorkloadComplianceStatus { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadComplianceStatus{} + } + + if len(a) == 0 { + return []WorkloadComplianceStatus{} + } + + items := make([]WorkloadComplianceStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadComplianceStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadComplianceStatus expands an instance of WorkloadComplianceStatus into a JSON +// request object. +func expandWorkloadComplianceStatus(c *Client, f *WorkloadComplianceStatus, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ActiveViolationCount; v != nil { + m["activeViolationCount"] = v + } + if v := f.AcknowledgedViolationCount; v != nil { + m["acknowledgedViolationCount"] = v + } + + return m, nil +} + +// flattenWorkloadComplianceStatus flattens an instance of WorkloadComplianceStatus from a JSON +// response object. +func flattenWorkloadComplianceStatus(c *Client, i interface{}, res *Workload) *WorkloadComplianceStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadComplianceStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadComplianceStatus + } + r.ActiveViolationCount = dcl.FlattenIntSlice(m["activeViolationCount"]) + r.AcknowledgedViolationCount = dcl.FlattenIntSlice(m["acknowledgedViolationCount"]) + + return r +} + +// expandWorkloadPartnerPermissionsMap expands the contents of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissionsMap(c *Client, f map[string]WorkloadPartnerPermissions, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadPartnerPermissions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadPartnerPermissionsSlice expands the contents of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissionsSlice(c *Client, f []WorkloadPartnerPermissions, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadPartnerPermissions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadPartnerPermissionsMap flattens the contents of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissionsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadPartnerPermissions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadPartnerPermissions{} + } + + if len(a) == 0 { + return map[string]WorkloadPartnerPermissions{} + } + + items := make(map[string]WorkloadPartnerPermissions) + for k, item := range a { + items[k] = *flattenWorkloadPartnerPermissions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadPartnerPermissionsSlice flattens the contents of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissionsSlice(c *Client, i interface{}, res *Workload) []WorkloadPartnerPermissions { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadPartnerPermissions{} + } + + if len(a) == 0 { + return []WorkloadPartnerPermissions{} + } + + items := make([]WorkloadPartnerPermissions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadPartnerPermissions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadPartnerPermissions expands an instance of WorkloadPartnerPermissions into a JSON +// request object. +func expandWorkloadPartnerPermissions(c *Client, f *WorkloadPartnerPermissions, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataLogsViewer; !dcl.IsEmptyValueIndirect(v) { + m["dataLogsViewer"] = v + } + if v := f.ServiceAccessApprover; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccessApprover"] = v + } + if v := f.AssuredWorkloadsMonitoring; !dcl.IsEmptyValueIndirect(v) { + m["assuredWorkloadsMonitoring"] = v + } + + return m, nil +} + +// flattenWorkloadPartnerPermissions flattens an instance of WorkloadPartnerPermissions from a JSON +// response object. +func flattenWorkloadPartnerPermissions(c *Client, i interface{}, res *Workload) *WorkloadPartnerPermissions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadPartnerPermissions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadPartnerPermissions + } + r.DataLogsViewer = dcl.FlattenBool(m["dataLogsViewer"]) + r.ServiceAccessApprover = dcl.FlattenBool(m["serviceAccessApprover"]) + r.AssuredWorkloadsMonitoring = dcl.FlattenBool(m["assuredWorkloadsMonitoring"]) + + return r +} + +// expandWorkloadWorkloadOptionsMap expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsMap(c *Client, f map[string]WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadWorkloadOptionsSlice expands the contents of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptionsSlice(c *Client, f []WorkloadWorkloadOptions, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadWorkloadOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadWorkloadOptionsMap flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptions{} + } + + items := make(map[string]WorkloadWorkloadOptions) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadWorkloadOptionsSlice flattens the contents of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptionsSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptions { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptions{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptions{} + } + + items := make([]WorkloadWorkloadOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadWorkloadOptions expands an instance of WorkloadWorkloadOptions into a JSON +// request object. +func expandWorkloadWorkloadOptions(c *Client, f *WorkloadWorkloadOptions, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KajEnrollmentType; !dcl.IsEmptyValueIndirect(v) { + m["kajEnrollmentType"] = v + } + + return m, nil +} + +// flattenWorkloadWorkloadOptions flattens an instance of WorkloadWorkloadOptions from a JSON +// response object. +func flattenWorkloadWorkloadOptions(c *Client, i interface{}, res *Workload) *WorkloadWorkloadOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadWorkloadOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadWorkloadOptions + } + r.KajEnrollmentType = flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(m["kajEnrollmentType"]) + + return r +} + +// expandWorkloadEkmProvisioningResponseMap expands the contents of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponseMap(c *Client, f map[string]WorkloadEkmProvisioningResponse, res *Workload) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkloadEkmProvisioningResponse(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkloadEkmProvisioningResponseSlice expands the contents of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponseSlice(c *Client, f []WorkloadEkmProvisioningResponse, res *Workload) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkloadEkmProvisioningResponse(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkloadEkmProvisioningResponseMap flattens the contents of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponse { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponse{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponse{} + } + + items := make(map[string]WorkloadEkmProvisioningResponse) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponse(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseSlice flattens the contents of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponse { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponse{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponse{} + } + + items := make([]WorkloadEkmProvisioningResponse, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponse(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkloadEkmProvisioningResponse expands an instance of WorkloadEkmProvisioningResponse into a JSON +// request object. +func expandWorkloadEkmProvisioningResponse(c *Client, f *WorkloadEkmProvisioningResponse, res *Workload) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EkmProvisioningState; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningState"] = v + } + if v := f.EkmProvisioningErrorDomain; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningErrorDomain"] = v + } + if v := f.EkmProvisioningErrorMapping; !dcl.IsEmptyValueIndirect(v) { + m["ekmProvisioningErrorMapping"] = v + } + + return m, nil +} + +// flattenWorkloadEkmProvisioningResponse flattens an instance of WorkloadEkmProvisioningResponse from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponse(c *Client, i interface{}, res *Workload) *WorkloadEkmProvisioningResponse { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkloadEkmProvisioningResponse{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkloadEkmProvisioningResponse + } + r.EkmProvisioningState = flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(m["ekmProvisioningState"]) + r.EkmProvisioningErrorDomain = flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(m["ekmProvisioningErrorDomain"]) + r.EkmProvisioningErrorMapping = flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(m["ekmProvisioningErrorMapping"]) + + return r +} + +// flattenWorkloadResourcesResourceTypeEnumMap flattens the contents of WorkloadResourcesResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourcesResourceTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourcesResourceTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourcesResourceTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadResourcesResourceTypeEnum{} + } + + items := make(map[string]WorkloadResourcesResourceTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadResourcesResourceTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadResourcesResourceTypeEnumSlice flattens the contents of WorkloadResourcesResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourcesResourceTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadResourcesResourceTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourcesResourceTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadResourcesResourceTypeEnum{} + } + + items := make([]WorkloadResourcesResourceTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourcesResourceTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadResourcesResourceTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadResourcesResourceTypeEnum with the same value as that string. +func flattenWorkloadResourcesResourceTypeEnum(i interface{}) *WorkloadResourcesResourceTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadResourcesResourceTypeEnumRef(s) +} + +// flattenWorkloadComplianceRegimeEnumMap flattens the contents of WorkloadComplianceRegimeEnum from a JSON +// response object. +func flattenWorkloadComplianceRegimeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadComplianceRegimeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadComplianceRegimeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadComplianceRegimeEnum{} + } + + items := make(map[string]WorkloadComplianceRegimeEnum) + for k, item := range a { + items[k] = *flattenWorkloadComplianceRegimeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadComplianceRegimeEnumSlice flattens the contents of WorkloadComplianceRegimeEnum from a JSON +// response object. +func flattenWorkloadComplianceRegimeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadComplianceRegimeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadComplianceRegimeEnum{} + } + + if len(a) == 0 { + return []WorkloadComplianceRegimeEnum{} + } + + items := make([]WorkloadComplianceRegimeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadComplianceRegimeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadComplianceRegimeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadComplianceRegimeEnum with the same value as that string. +func flattenWorkloadComplianceRegimeEnum(i interface{}) *WorkloadComplianceRegimeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadComplianceRegimeEnumRef(s) +} + +// flattenWorkloadResourceSettingsResourceTypeEnumMap flattens the contents of WorkloadResourceSettingsResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourceSettingsResourceTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadResourceSettingsResourceTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadResourceSettingsResourceTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadResourceSettingsResourceTypeEnum{} + } + + items := make(map[string]WorkloadResourceSettingsResourceTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadResourceSettingsResourceTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadResourceSettingsResourceTypeEnumSlice flattens the contents of WorkloadResourceSettingsResourceTypeEnum from a JSON +// response object. +func flattenWorkloadResourceSettingsResourceTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadResourceSettingsResourceTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadResourceSettingsResourceTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadResourceSettingsResourceTypeEnum{} + } + + items := make([]WorkloadResourceSettingsResourceTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadResourceSettingsResourceTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadResourceSettingsResourceTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadResourceSettingsResourceTypeEnum with the same value as that string. +func flattenWorkloadResourceSettingsResourceTypeEnum(i interface{}) *WorkloadResourceSettingsResourceTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadResourceSettingsResourceTypeEnumRef(s) +} + +// flattenWorkloadKajEnrollmentStateEnumMap flattens the contents of WorkloadKajEnrollmentStateEnum from a JSON +// response object. +func flattenWorkloadKajEnrollmentStateEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadKajEnrollmentStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadKajEnrollmentStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadKajEnrollmentStateEnum{} + } + + items := make(map[string]WorkloadKajEnrollmentStateEnum) + for k, item := range a { + items[k] = *flattenWorkloadKajEnrollmentStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadKajEnrollmentStateEnumSlice flattens the contents of WorkloadKajEnrollmentStateEnum from a JSON +// response object. +func flattenWorkloadKajEnrollmentStateEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadKajEnrollmentStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadKajEnrollmentStateEnum{} + } + + if len(a) == 0 { + return []WorkloadKajEnrollmentStateEnum{} + } + + items := make([]WorkloadKajEnrollmentStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadKajEnrollmentStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadKajEnrollmentStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadKajEnrollmentStateEnum with the same value as that string. +func flattenWorkloadKajEnrollmentStateEnum(i interface{}) *WorkloadKajEnrollmentStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadKajEnrollmentStateEnumRef(s) +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumMap flattens the contents of WorkloadSaaEnrollmentResponseSetupErrorsEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponseSetupErrorsEnum) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice flattens the contents of WorkloadSaaEnrollmentResponseSetupErrorsEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponseSetupErrorsEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponseSetupErrorsEnum{} + } + + items := make([]WorkloadSaaEnrollmentResponseSetupErrorsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadSaaEnrollmentResponseSetupErrorsEnum with the same value as that string. +func flattenWorkloadSaaEnrollmentResponseSetupErrorsEnum(i interface{}) *WorkloadSaaEnrollmentResponseSetupErrorsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadSaaEnrollmentResponseSetupErrorsEnumRef(s) +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnumMap flattens the contents of WorkloadSaaEnrollmentResponseSetupStatusEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + items := make(map[string]WorkloadSaaEnrollmentResponseSetupStatusEnum) + for k, item := range a { + items[k] = *flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnumSlice flattens the contents of WorkloadSaaEnrollmentResponseSetupStatusEnum from a JSON +// response object. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadSaaEnrollmentResponseSetupStatusEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + if len(a) == 0 { + return []WorkloadSaaEnrollmentResponseSetupStatusEnum{} + } + + items := make([]WorkloadSaaEnrollmentResponseSetupStatusEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadSaaEnrollmentResponseSetupStatusEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadSaaEnrollmentResponseSetupStatusEnum with the same value as that string. +func flattenWorkloadSaaEnrollmentResponseSetupStatusEnum(i interface{}) *WorkloadSaaEnrollmentResponseSetupStatusEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadSaaEnrollmentResponseSetupStatusEnumRef(s) +} + +// flattenWorkloadPartnerEnumMap flattens the contents of WorkloadPartnerEnum from a JSON +// response object. +func flattenWorkloadPartnerEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadPartnerEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadPartnerEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadPartnerEnum{} + } + + items := make(map[string]WorkloadPartnerEnum) + for k, item := range a { + items[k] = *flattenWorkloadPartnerEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadPartnerEnumSlice flattens the contents of WorkloadPartnerEnum from a JSON +// response object. +func flattenWorkloadPartnerEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadPartnerEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadPartnerEnum{} + } + + if len(a) == 0 { + return []WorkloadPartnerEnum{} + } + + items := make([]WorkloadPartnerEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadPartnerEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadPartnerEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadPartnerEnum with the same value as that string. +func flattenWorkloadPartnerEnum(i interface{}) *WorkloadPartnerEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadPartnerEnumRef(s) +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make(map[string]WorkloadWorkloadOptionsKajEnrollmentTypeEnum) + for k, item := range a { + items[k] = *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice flattens the contents of WorkloadWorkloadOptionsKajEnrollmentTypeEnum from a JSON +// response object. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + if len(a) == 0 { + return []WorkloadWorkloadOptionsKajEnrollmentTypeEnum{} + } + + items := make([]WorkloadWorkloadOptionsKajEnrollmentTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadWorkloadOptionsKajEnrollmentTypeEnum with the same value as that string. +func flattenWorkloadWorkloadOptionsKajEnrollmentTypeEnum(i interface{}) *WorkloadWorkloadOptionsKajEnrollmentTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadWorkloadOptionsKajEnrollmentTypeEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningStateEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningStateEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningStateEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningStateEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningStateEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnumRef(s) +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumMap flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumMap(c *Client, i interface{}, res *Workload) map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + if len(a) == 0 { + return map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + items := make(map[string]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum) + for k, item := range a { + items[k] = *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(item.(interface{})) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumSlice flattens the contents of WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum from a JSON +// response object. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumSlice(c *Client, i interface{}, res *Workload) []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + if len(a) == 0 { + return []WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum{} + } + + items := make([]WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum asserts that an interface is a string, and returns a +// pointer to a *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum with the same value as that string. +func flattenWorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum(i interface{}) *WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Workload) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkload(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Organization == nil && ncr.Organization == nil { + c.Config.Logger.Info("Both Organization fields null - considering equal.") + } else if nr.Organization == nil || ncr.Organization == nil { + c.Config.Logger.Info("Only one Organization field is null - considering unequal.") + return false + } else if *nr.Organization != *ncr.Organization { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workloadDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workloadApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkloadDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workloadDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workloadDiff + // For each operation name, create a workloadDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workloadDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkloadApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkloadApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workloadApiOperation, error) { + switch opName { + + case "updateWorkloadUpdateWorkloadOperation": + return &updateWorkloadUpdateWorkloadOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkloadFields(r *Workload) error { + vKmsSettings := r.KmsSettings + if vKmsSettings == nil { + // note: explicitly not the empty object. + vKmsSettings = &WorkloadKmsSettings{} + } + if err := extractWorkloadKmsSettingsFields(r, vKmsSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKmsSettings) { + r.KmsSettings = vKmsSettings + } + vSaaEnrollmentResponse := r.SaaEnrollmentResponse + if vSaaEnrollmentResponse == nil { + // note: explicitly not the empty object. + vSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{} + } + if err := extractWorkloadSaaEnrollmentResponseFields(r, vSaaEnrollmentResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSaaEnrollmentResponse) { + r.SaaEnrollmentResponse = vSaaEnrollmentResponse + } + vComplianceStatus := r.ComplianceStatus + if vComplianceStatus == nil { + // note: explicitly not the empty object. + vComplianceStatus = &WorkloadComplianceStatus{} + } + if err := extractWorkloadComplianceStatusFields(r, vComplianceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComplianceStatus) { + r.ComplianceStatus = vComplianceStatus + } + vPartnerPermissions := r.PartnerPermissions + if vPartnerPermissions == nil { + // note: explicitly not the empty object. + vPartnerPermissions = &WorkloadPartnerPermissions{} + } + if err := extractWorkloadPartnerPermissionsFields(r, vPartnerPermissions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { + r.PartnerPermissions = vPartnerPermissions + } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := extractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } + vEkmProvisioningResponse := r.EkmProvisioningResponse + if vEkmProvisioningResponse == nil { + // note: explicitly not the empty object. + vEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{} + } + if err := extractWorkloadEkmProvisioningResponseFields(r, vEkmProvisioningResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEkmProvisioningResponse) { + r.EkmProvisioningResponse = vEkmProvisioningResponse + } + return nil +} +func extractWorkloadResourcesFields(r *Workload, o *WorkloadResources) error { + return nil +} +func extractWorkloadKmsSettingsFields(r *Workload, o *WorkloadKmsSettings) error { + return nil +} +func extractWorkloadResourceSettingsFields(r *Workload, o *WorkloadResourceSettings) error { + return nil +} +func extractWorkloadSaaEnrollmentResponseFields(r *Workload, o *WorkloadSaaEnrollmentResponse) error { + return nil +} +func extractWorkloadComplianceStatusFields(r *Workload, o *WorkloadComplianceStatus) error { + return nil +} +func extractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { + return nil +} +func extractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} +func extractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { + return nil +} + +func postReadExtractWorkloadFields(r *Workload) error { + vKmsSettings := r.KmsSettings + if vKmsSettings == nil { + // note: explicitly not the empty object. + vKmsSettings = &WorkloadKmsSettings{} + } + if err := postReadExtractWorkloadKmsSettingsFields(r, vKmsSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKmsSettings) { + r.KmsSettings = vKmsSettings + } + vSaaEnrollmentResponse := r.SaaEnrollmentResponse + if vSaaEnrollmentResponse == nil { + // note: explicitly not the empty object. + vSaaEnrollmentResponse = &WorkloadSaaEnrollmentResponse{} + } + if err := postReadExtractWorkloadSaaEnrollmentResponseFields(r, vSaaEnrollmentResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSaaEnrollmentResponse) { + r.SaaEnrollmentResponse = vSaaEnrollmentResponse + } + vComplianceStatus := r.ComplianceStatus + if vComplianceStatus == nil { + // note: explicitly not the empty object. + vComplianceStatus = &WorkloadComplianceStatus{} + } + if err := postReadExtractWorkloadComplianceStatusFields(r, vComplianceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComplianceStatus) { + r.ComplianceStatus = vComplianceStatus + } + vPartnerPermissions := r.PartnerPermissions + if vPartnerPermissions == nil { + // note: explicitly not the empty object. + vPartnerPermissions = &WorkloadPartnerPermissions{} + } + if err := postReadExtractWorkloadPartnerPermissionsFields(r, vPartnerPermissions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPartnerPermissions) { + r.PartnerPermissions = vPartnerPermissions + } + vWorkloadOptions := r.WorkloadOptions + if vWorkloadOptions == nil { + // note: explicitly not the empty object. + vWorkloadOptions = &WorkloadWorkloadOptions{} + } + if err := postReadExtractWorkloadWorkloadOptionsFields(r, vWorkloadOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadOptions) { + r.WorkloadOptions = vWorkloadOptions + } + vEkmProvisioningResponse := r.EkmProvisioningResponse + if vEkmProvisioningResponse == nil { + // note: explicitly not the empty object. + vEkmProvisioningResponse = &WorkloadEkmProvisioningResponse{} + } + if err := postReadExtractWorkloadEkmProvisioningResponseFields(r, vEkmProvisioningResponse); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEkmProvisioningResponse) { + r.EkmProvisioningResponse = vEkmProvisioningResponse + } + return nil +} +func postReadExtractWorkloadResourcesFields(r *Workload, o *WorkloadResources) error { + return nil +} +func postReadExtractWorkloadKmsSettingsFields(r *Workload, o *WorkloadKmsSettings) error { + return nil +} +func postReadExtractWorkloadResourceSettingsFields(r *Workload, o *WorkloadResourceSettings) error { + return nil +} +func postReadExtractWorkloadSaaEnrollmentResponseFields(r *Workload, o *WorkloadSaaEnrollmentResponse) error { + return nil +} +func postReadExtractWorkloadComplianceStatusFields(r *Workload, o *WorkloadComplianceStatus) error { + return nil +} +func postReadExtractWorkloadPartnerPermissionsFields(r *Workload, o *WorkloadPartnerPermissions) error { + return nil +} +func postReadExtractWorkloadWorkloadOptionsFields(r *Workload, o *WorkloadWorkloadOptions) error { + return nil +} +func postReadExtractWorkloadEkmProvisioningResponseFields(r *Workload, o *WorkloadEkmProvisioningResponse) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl new file mode 100644 index 000000000000..e56c85c5622d --- /dev/null +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl @@ -0,0 +1,552 @@ +package assuredworkloads + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLWorkloadSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "AssuredWorkloads/Workload", + Description: "The AssuredWorkloads Workload resource", + StructName: "Workload", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Workload", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workload", + Required: true, + Description: "A full instance of a Workload", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Workload", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workload", + Required: true, + Description: "A full instance of a Workload", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Workload", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workload", + Required: true, + Description: "A full instance of a Workload", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Workload", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "organization", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Workload", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "organization", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Workload": &dcl.Component{ + Title: "Workload", + ID: "organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", + UsesStateHint: true, + ParentContainer: "organization", + LabelsField: "labels", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "displayName", + "complianceRegime", + "organization", + "location", + }, + Properties: map[string]*dcl.Property{ + "billingAccount": &dcl.Property{ + Type: "string", + GoName: "BillingAccount", + Description: "Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/BillingAccount", + Field: "name", + }, + }, + Unreadable: true, + }, + "complianceRegime": &dcl.Property{ + Type: "string", + GoName: "ComplianceRegime", + GoType: "WorkloadComplianceRegimeEnum", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075", + Immutable: true, + Enum: []string{ + "COMPLIANCE_REGIME_UNSPECIFIED", + "IL4", + "CJIS", + "FEDRAMP_HIGH", + "FEDRAMP_MODERATE", + "US_REGIONAL_ACCESS", + "HIPAA", + "HITRUST", + "EU_REGIONS_AND_SUPPORT", + "CA_REGIONS_AND_SUPPORT", + "ITAR", + "AU_REGIONS_AND_US_SUPPORT", + "ASSURED_WORKLOADS_FOR_PARTNERS", + "ISR_REGIONS", + "ISR_REGIONS_AND_SUPPORT", + "CA_PROTECTED_B", + "IL5", + "IL2", + "JP_REGIONS_AND_SUPPORT", + "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", + "REGIONAL_CONTROLS", + "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", + "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT", + "IRS_1075", + }, + }, + "complianceStatus": &dcl.Property{ + Type: "object", + GoName: "ComplianceStatus", + GoType: "WorkloadComplianceStatus", + ReadOnly: true, + Description: "Output only. Count of active Violations in the Workload.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "acknowledgedViolationCount": &dcl.Property{ + Type: "array", + GoName: "AcknowledgedViolationCount", + Description: "Number of current orgPolicy violations which are acknowledged.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "integer", + Format: "int64", + GoType: "int64", + }, + }, + "activeViolationCount": &dcl.Property{ + Type: "array", + GoName: "ActiveViolationCount", + Description: "Number of current orgPolicy violations which are not acknowledged.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "integer", + Format: "int64", + GoType: "int64", + }, + }, + }, + }, + "compliantButDisallowedServices": &dcl.Property{ + Type: "array", + GoName: "CompliantButDisallowedServices", + ReadOnly: true, + Description: "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Immutable. The Workload creation timestamp.", + Immutable: true, + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", + }, + "ekmProvisioningResponse": &dcl.Property{ + Type: "object", + GoName: "EkmProvisioningResponse", + GoType: "WorkloadEkmProvisioningResponse", + ReadOnly: true, + Description: "Optional. Represents the Ekm Provisioning State of the given workload.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "ekmProvisioningErrorDomain": &dcl.Property{ + Type: "string", + GoName: "EkmProvisioningErrorDomain", + GoType: "WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum", + Description: "Indicates Ekm provisioning error if any. Possible values: EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR, EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR", + Immutable: true, + Enum: []string{ + "EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED", + "UNSPECIFIED_ERROR", + "GOOGLE_SERVER_ERROR", + "EXTERNAL_USER_ERROR", + "EXTERNAL_PARTNER_ERROR", + "TIMEOUT_ERROR", + }, + }, + "ekmProvisioningErrorMapping": &dcl.Property{ + Type: "string", + GoName: "EkmProvisioningErrorMapping", + GoType: "WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum", + Description: "Detailed error message if Ekm provisioning fails Possible values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT, MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION", + Immutable: true, + Enum: []string{ + "EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED", + "INVALID_SERVICE_ACCOUNT", + "MISSING_METRICS_SCOPE_ADMIN_PERMISSION", + "MISSING_EKM_CONNECTION_ADMIN_PERMISSION", + }, + }, + "ekmProvisioningState": &dcl.Property{ + Type: "string", + GoName: "EkmProvisioningState", + GoType: "WorkloadEkmProvisioningResponseEkmProvisioningStateEnum", + Description: "Indicates Ekm enrollment Provisioning of a given workload. Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING, EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED", + Immutable: true, + Enum: []string{ + "EKM_PROVISIONING_STATE_UNSPECIFIED", + "EKM_PROVISIONING_STATE_PENDING", + "EKM_PROVISIONING_STATE_FAILED", + "EKM_PROVISIONING_STATE_COMPLETED", + }, + }, + }, + }, + "enableSovereignControls": &dcl.Property{ + Type: "boolean", + GoName: "EnableSovereignControls", + Description: "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", + Immutable: true, + }, + "kajEnrollmentState": &dcl.Property{ + Type: "string", + GoName: "KajEnrollmentState", + GoType: "WorkloadKajEnrollmentStateEnum", + ReadOnly: true, + Description: "Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE", + Immutable: true, + Enum: []string{ + "KAJ_ENROLLMENT_STATE_UNSPECIFIED", + "KAJ_ENROLLMENT_STATE_PENDING", + "KAJ_ENROLLMENT_STATE_COMPLETE", + }, + }, + "kmsSettings": &dcl.Property{ + Type: "object", + GoName: "KmsSettings", + GoType: "WorkloadKmsSettings", + Description: "**DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field.", + Immutable: true, + Unreadable: true, + Required: []string{ + "nextRotationTime", + "rotationPeriod", + }, + Properties: map[string]*dcl.Property{ + "nextRotationTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "NextRotationTime", + Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", + Immutable: true, + }, + "rotationPeriod": &dcl.Property{ + Type: "string", + GoName: "RotationPeriod", + Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", + Immutable: true, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. Labels applied to the workload.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Output only. The resource name of the workload.", + Immutable: true, + ServerGeneratedParameter: true, + HasLongForm: true, + }, + "organization": &dcl.Property{ + Type: "string", + GoName: "Organization", + Description: "The organization for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Organization", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "partner": &dcl.Property{ + Type: "string", + GoName: "Partner", + GoType: "WorkloadPartnerEnum", + Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", + Immutable: true, + Enum: []string{ + "PARTNER_UNSPECIFIED", + "LOCAL_CONTROLS_BY_S3NS", + "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", + "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", + "SOVEREIGN_CONTROLS_BY_PSN", + "SOVEREIGN_CONTROLS_BY_CNTXT", + "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", + }, + }, + "partnerPermissions": &dcl.Property{ + Type: "object", + GoName: "PartnerPermissions", + GoType: "WorkloadPartnerPermissions", + Description: "Optional. Permissions granted to the AW Partner SA account for the customer workload", + Immutable: true, + Properties: map[string]*dcl.Property{ + "assuredWorkloadsMonitoring": &dcl.Property{ + Type: "boolean", + GoName: "AssuredWorkloadsMonitoring", + Description: "Optional. Allow partner to view violation alerts.", + Immutable: true, + }, + "dataLogsViewer": &dcl.Property{ + Type: "boolean", + GoName: "DataLogsViewer", + Description: "Allow the partner to view inspectability logs and monitoring violations.", + Immutable: true, + }, + "serviceAccessApprover": &dcl.Property{ + Type: "boolean", + GoName: "ServiceAccessApprover", + Description: "Optional. Allow partner to view access approval logs.", + Immutable: true, + }, + }, + }, + "partnerServicesBillingAccount": &dcl.Property{ + Type: "string", + GoName: "PartnerServicesBillingAccount", + Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", + Immutable: true, + Unreadable: true, + }, + "provisionedResourcesParent": &dcl.Property{ + Type: "string", + GoName: "ProvisionedResourcesParent", + Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}", + Immutable: true, + Unreadable: true, + }, + "resourceSettings": &dcl.Property{ + Type: "array", + GoName: "ResourceSettings", + Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkloadResourceSettings", + Properties: map[string]*dcl.Property{ + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "User-assigned resource display name. If not empty it will be used to create a resource with the specified name.", + Immutable: true, + }, + "resourceId": &dcl.Property{ + Type: "string", + GoName: "ResourceId", + Description: "Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google.", + Immutable: true, + }, + "resourceType": &dcl.Property{ + Type: "string", + GoName: "ResourceType", + GoType: "WorkloadResourceSettingsResourceTypeEnum", + Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + Immutable: true, + Enum: []string{ + "RESOURCE_TYPE_UNSPECIFIED", + "CONSUMER_PROJECT", + "ENCRYPTION_KEYS_PROJECT", + "KEYRING", + "CONSUMER_FOLDER", + }, + }, + }, + }, + Unreadable: true, + }, + "resources": &dcl.Property{ + Type: "array", + GoName: "Resources", + ReadOnly: true, + Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkloadResources", + Properties: map[string]*dcl.Property{ + "resourceId": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "ResourceId", + Description: "Resource identifier. For a project this represents project_number.", + Immutable: true, + }, + "resourceType": &dcl.Property{ + Type: "string", + GoName: "ResourceType", + GoType: "WorkloadResourcesResourceTypeEnum", + Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + Immutable: true, + Enum: []string{ + "RESOURCE_TYPE_UNSPECIFIED", + "CONSUMER_PROJECT", + "ENCRYPTION_KEYS_PROJECT", + "KEYRING", + "CONSUMER_FOLDER", + }, + }, + }, + }, + }, + "saaEnrollmentResponse": &dcl.Property{ + Type: "object", + GoName: "SaaEnrollmentResponse", + GoType: "WorkloadSaaEnrollmentResponse", + ReadOnly: true, + Description: "Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "setupErrors": &dcl.Property{ + Type: "array", + GoName: "SetupErrors", + Description: "Indicates SAA enrollment setup error if any.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "WorkloadSaaEnrollmentResponseSetupErrorsEnum", + Enum: []string{ + "SETUP_ERROR_UNSPECIFIED", + "ERROR_INVALID_BASE_SETUP", + "ERROR_MISSING_EXTERNAL_SIGNING_KEY", + "ERROR_NOT_ALL_SERVICES_ENROLLED", + "ERROR_SETUP_CHECK_FAILED", + }, + }, + }, + "setupStatus": &dcl.Property{ + Type: "string", + GoName: "SetupStatus", + GoType: "WorkloadSaaEnrollmentResponseSetupStatusEnum", + Description: "Indicates SAA enrollment status of a given workload. Possible values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE", + Immutable: true, + Enum: []string{ + "SETUP_STATE_UNSPECIFIED", + "STATUS_PENDING", + "STATUS_COMPLETE", + }, + }, + }, + }, + "violationNotificationsEnabled": &dcl.Property{ + Type: "boolean", + GoName: "ViolationNotificationsEnabled", + Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", + Immutable: true, + }, + "workloadOptions": &dcl.Property{ + Type: "object", + GoName: "WorkloadOptions", + GoType: "WorkloadWorkloadOptions", + Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", + Immutable: true, + Unreadable: true, + Properties: map[string]*dcl.Property{ + "kajEnrollmentType": &dcl.Property{ + Type: "string", + GoName: "KajEnrollmentType", + GoType: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", + Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", + Immutable: true, + Enum: []string{ + "KAJ_ENROLLMENT_TYPE_UNSPECIFIED", + "FULL_KAJ", + "EKM_ONLY", + "KEY_ACCESS_TRANSPARENCY_OFF", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl new file mode 100644 index 000000000000..daccbd324abc --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl @@ -0,0 +1,18 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl new file mode 100644 index 000000000000..879c3819e6b2 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl @@ -0,0 +1,103 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// betaToGaPrivatePool is populating GA specific PrivatePoolV1Config values and setting WorkerConfig and NetworkConfig to nil. +// r.PrivatePoolV1Config and c points to the same object. +func betaToGaPrivatePool(r *WorkerPool, c *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + cfgWorkerConfig := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + cfgNetworkConfig := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + cfgPrivateServiceConnect := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + if r.WorkerConfig != nil { + cfgWorkerConfig.DiskSizeGb = r.WorkerConfig.DiskSizeGb + cfgWorkerConfig.MachineType = r.WorkerConfig.MachineType + cfgWorkerConfig.EnableNestedVirtualization = r.WorkerConfig.EnableNestedVirtualization + cfgNetworkConfig.EgressOption = noExternalIPEnum(r.WorkerConfig.NoExternalIP) + } + if r.NetworkConfig != nil { + cfgNetworkConfig.PeeredNetwork = r.NetworkConfig.PeeredNetwork + cfgNetworkConfig.PeeredNetworkIPRange = r.NetworkConfig.PeeredNetworkIPRange + } + if r.PrivateServiceConnect != nil { + cfgPrivateServiceConnect.NetworkAttachment = r.PrivateServiceConnect.NetworkAttachment + cfgPrivateServiceConnect.RouteAllTraffic = r.PrivateServiceConnect.RouteAllTraffic + if r.WorkerConfig != nil { + cfgPrivateServiceConnect.PublicIPAddressDisabled = r.WorkerConfig.NoExternalIP + } + } + + cfg := &WorkerPoolPrivatePoolV1Config{} + cfg.WorkerConfig = cfgWorkerConfig + cfg.NetworkConfig = cfgNetworkConfig + if cfg.PrivateServiceConnect != nil { + cfg.NetworkConfig = nil + cfg.PrivateServiceConnect = cfgPrivateServiceConnect + } + + r.WorkerConfig = nil + r.NetworkConfig = nil + r.PrivateServiceConnect = nil + return cfg +} + +// gaToBetaPrivatePool is populating beta specific values (WorkerConfig and NetworkConfig) and setting PrivatePoolV1Config to nil. +// r.PrivatePoolV1Config and c points to the same object. +func gaToBetaPrivatePool(r *WorkerPool, c *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + if c == nil { + return nil + } + + if c.WorkerConfig != nil && r.WorkerConfig == nil { + r.WorkerConfig = &WorkerPoolWorkerConfig{ + DiskSizeGb: c.WorkerConfig.DiskSizeGb, + MachineType: c.WorkerConfig.MachineType, + EnableNestedVirtualization: c.WorkerConfig.EnableNestedVirtualization, + } + if c.NetworkConfig != nil { + r.WorkerConfig.NoExternalIP = noExternalIPBoolean(c.NetworkConfig) + } + if c.PrivateServiceConnect != nil { + r.WorkerConfig.NoExternalIP = c.PrivateServiceConnect.PublicIPAddressDisabled + } + } + if c.NetworkConfig != nil && c.NetworkConfig.PeeredNetwork != nil && r.NetworkConfig == nil { + r.NetworkConfig = &WorkerPoolNetworkConfig{ + PeeredNetwork: c.NetworkConfig.PeeredNetwork, + PeeredNetworkIPRange: c.NetworkConfig.PeeredNetworkIPRange, + } + } + if c.PrivateServiceConnect != nil && r.PrivateServiceConnect != nil { + r.PrivateServiceConnect = &WorkerPoolPrivateServiceConnect{ + NetworkAttachment: c.PrivateServiceConnect.NetworkAttachment, + RouteAllTraffic: c.PrivateServiceConnect.RouteAllTraffic, + } + } + + r.PrivatePoolV1Config = nil + return nil +} + +func noExternalIPBoolean(networkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig) *bool { + if networkConfig == nil || networkConfig.EgressOption == nil { + return nil + } + if string(*networkConfig.EgressOption) == "NO_PUBLIC_EGRESS" { + return dcl.Bool(true) + } + if string(*networkConfig.EgressOption) == "PUBLIC_EGRESS" { + return dcl.Bool(false) + } + return nil +} + +func noExternalIPEnum(noExternalIP *bool) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + if noExternalIP == nil { + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("EGRESS_OPTION_UNSPECIFIED") + } + if *noExternalIP { + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("NO_PUBLIC_EGRESS") + } + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef("PUBLIC_EGRESS") +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go new file mode 100644 index 000000000000..ac4e19e756a8 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLCloudbuildClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.CloudBuildBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go new file mode 100644 index 000000000000..94789efcfb70 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool.go @@ -0,0 +1,573 @@ +package cloudbuild + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCloudbuildWorkerPool() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildWorkerPoolCreate, + Read: resourceCloudbuildWorkerPoolRead, + Update: resourceCloudbuildWorkerPoolUpdate, + Delete: resourceCloudbuildWorkerPoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudbuildWorkerPoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "User-defined name of the `WorkerPool`.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "network_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Network configuration for the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolNetworkConfigSchema(), + ConflictsWith: []string{"private_service_connect"}, + }, + + "private_service_connect": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Private Service Connect configuration for the pool.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolPrivateServiceConnectSchema(), + ConflictsWith: []string{"network_config"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration to be used for a creating workers in the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolWorkerConfigSchema(), + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to create the `WorkerPool` was received.", + }, + + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to delete the `WorkerPool` was received.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. `WorkerPool` state. Possible values: STATE_UNSPECIFIED, PENDING, APPROVED, REJECTED, CANCELLED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A unique identifier for the `WorkerPool`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to update the `WorkerPool` was received.", + }, + }, + } +} + +func CloudbuildWorkerPoolNetworkConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "peered_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", + }, + + "peered_network_ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.", + }, + }, + } +} + +func CloudbuildWorkerPoolPrivateServiceConnectSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_attachment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", + }, + + "route_all_traffic": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", + }, + }, + } +} + +func CloudbuildWorkerPoolWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", + }, + + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable nested virtualization on the worker, if supported by the machine type. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will set this to false.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: "Machine type of a worker, such as `n1-standard-1`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.", + }, + + "no_external_ip": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "If true, workers are created without any public address, which prevents network egress to public IPs.", + }, + }, + } +} + +func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkerPool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("CloudbuildWorkerPool %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("network_config", flattenCloudbuildWorkerPoolNetworkConfig(res.NetworkConfig)); err != nil { + return fmt.Errorf("error setting network_config in state: %s", err) + } + if err = d.Set("private_service_connect", flattenCloudbuildWorkerPoolPrivateServiceConnect(res.PrivateServiceConnect)); err != nil { + return fmt.Errorf("error setting private_service_connect in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("worker_config", flattenCloudbuildWorkerPoolWorkerConfig(res.WorkerConfig)); err != nil { + return fmt.Errorf("error setting worker_config in state: %s", err) + } + if err = d.Set("annotations", flattenCloudbuildWorkerPoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("delete_time", res.DeleteTime); err != nil { + return fmt.Errorf("error setting delete_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + PrivateServiceConnect: expandCloudbuildWorkerPoolPrivateServiceConnect(d.Get("private_service_connect")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + log.Printf("[DEBUG] Deleting WorkerPool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkerPool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkerPool %q", d.Id()) + return nil +} + +func resourceCloudbuildWorkerPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workerPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandCloudbuildWorkerPoolNetworkConfig(o interface{}) *WorkerPoolNetworkConfig { + if o == nil { + return EmptyWorkerPoolNetworkConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkerPoolNetworkConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolNetworkConfig{ + PeeredNetwork: dcl.String(obj["peered_network"].(string)), + PeeredNetworkIPRange: dcl.String(obj["peered_network_ip_range"].(string)), + } +} + +func flattenCloudbuildWorkerPoolNetworkConfig(obj *WorkerPoolNetworkConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "peered_network": obj.PeeredNetwork, + "peered_network_ip_range": obj.PeeredNetworkIPRange, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildWorkerPoolPrivateServiceConnect(o interface{}) *WorkerPoolPrivateServiceConnect { + if o == nil { + return EmptyWorkerPoolPrivateServiceConnect + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkerPoolPrivateServiceConnect + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolPrivateServiceConnect{ + NetworkAttachment: dcl.String(obj["network_attachment"].(string)), + RouteAllTraffic: dcl.Bool(obj["route_all_traffic"].(bool)), + } +} + +func flattenCloudbuildWorkerPoolPrivateServiceConnect(obj *WorkerPoolPrivateServiceConnect) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "network_attachment": obj.NetworkAttachment, + "route_all_traffic": obj.RouteAllTraffic, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildWorkerPoolWorkerConfig(o interface{}) *WorkerPoolWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkerPoolWorkerConfig{ + DiskSizeGb: dcl.Int64(int64(obj["disk_size_gb"].(int))), + EnableNestedVirtualization: dcl.Bool(obj["enable_nested_virtualization"].(bool)), + MachineType: dcl.String(obj["machine_type"].(string)), + NoExternalIP: dcl.Bool(obj["no_external_ip"].(bool)), + } +} + +func flattenCloudbuildWorkerPoolWorkerConfig(obj *WorkerPoolWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disk_size_gb": obj.DiskSizeGb, + "enable_nested_virtualization": obj.EnableNestedVirtualization, + "machine_type": obj.MachineType, + "no_external_ip": obj.NoExternalIP, + } + + return []interface{}{transformed} + +} + +func flattenCloudbuildWorkerPoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go new file mode 100644 index 000000000000..8576c3b4b5d6 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go @@ -0,0 +1,53 @@ +package cloudbuild + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("CloudbuildWorkerPool", testSweepCloudbuildWorkerPool) +} + +func testSweepCloudbuildWorkerPool(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for CloudbuildWorkerPool") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLCloudbuildClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkerPool(context.Background(), d["project"], d["location"], isDeletableCloudbuildWorkerPool) + if err != nil { + return err + } + return nil +} + +func isDeletableCloudbuildWorkerPool(r *WorkerPool) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl new file mode 100644 index 000000000000..d3cc6238c479 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl @@ -0,0 +1,802 @@ +package cloudbuild + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type WorkerPool struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + Annotations map[string]string `json:"annotations"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + DeleteTime *string `json:"deleteTime"` + State *WorkerPoolStateEnum `json:"state"` + PrivatePoolV1Config *WorkerPoolPrivatePoolV1Config `json:"privatePoolV1Config"` + Etag *string `json:"etag"` + WorkerConfig *WorkerPoolWorkerConfig `json:"workerConfig"` + NetworkConfig *WorkerPoolNetworkConfig `json:"networkConfig"` + PrivateServiceConnect *WorkerPoolPrivateServiceConnect `json:"privateServiceConnect"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *WorkerPool) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkerPoolStateEnum. +type WorkerPoolStateEnum string + +// WorkerPoolStateEnumRef returns a *WorkerPoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkerPoolStateEnumRef(s string) *WorkerPoolStateEnum { + v := WorkerPoolStateEnum(s) + return &v +} + +func (v WorkerPoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PENDING", "APPROVED", "REJECTED", "CANCELLED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkerPoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum. +type WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum string + +// WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef returns a *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef(s string) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + v := WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(s) + return &v +} + +func (v WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EGRESS_OPTION_UNSPECIFIED", "NO_PUBLIC_EGRESS", "PUBLIC_EGRESS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkerPoolPrivatePoolV1Config struct { + empty bool `json:"-"` + WorkerConfig *WorkerPoolPrivatePoolV1ConfigWorkerConfig `json:"workerConfig"` + NetworkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig `json:"networkConfig"` + PrivateServiceConnect *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect `json:"privateServiceConnect"` +} + +type jsonWorkerPoolPrivatePoolV1Config WorkerPoolPrivatePoolV1Config + +func (r *WorkerPoolPrivatePoolV1Config) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1Config + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1Config + } else { + + r.WorkerConfig = res.WorkerConfig + + r.NetworkConfig = res.NetworkConfig + + r.PrivateServiceConnect = res.PrivateServiceConnect + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1Config is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1Config *WorkerPoolPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{empty: true} + +func (r *WorkerPoolPrivatePoolV1Config) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1Config) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1Config) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigWorkerConfig struct { + empty bool `json:"-"` + MachineType *string `json:"machineType"` + DiskSizeGb *int64 `json:"diskSizeGb"` + EnableNestedVirtualization *bool `json:"enableNestedVirtualization"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigWorkerConfig WorkerPoolPrivatePoolV1ConfigWorkerConfig + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig + } else { + + r.MachineType = res.MachineType + + r.DiskSizeGb = res.DiskSizeGb + + r.EnableNestedVirtualization = res.EnableNestedVirtualization + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig *WorkerPoolPrivatePoolV1ConfigWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigNetworkConfig struct { + empty bool `json:"-"` + PeeredNetwork *string `json:"peeredNetwork"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange"` + EgressOption *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum `json:"egressOption"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigNetworkConfig WorkerPoolPrivatePoolV1ConfigNetworkConfig + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigNetworkConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig + } else { + + r.PeeredNetwork = res.PeeredNetwork + + r.PeeredNetworkIPRange = res.PeeredNetworkIPRange + + r.EgressOption = res.EgressOption + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigNetworkConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig *WorkerPoolPrivatePoolV1ConfigNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect struct { + empty bool `json:"-"` + NetworkAttachment *string `json:"networkAttachment"` + PublicIPAddressDisabled *bool `json:"publicIPAddressDisabled"` + RouteAllTraffic *bool `json:"routeAllTraffic"` +} + +type jsonWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + } else { + + r.NetworkAttachment = res.NetworkAttachment + + r.PublicIPAddressDisabled = res.PublicIPAddressDisabled + + r.RouteAllTraffic = res.RouteAllTraffic + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{empty: true} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolWorkerConfig struct { + empty bool `json:"-"` + MachineType *string `json:"machineType"` + DiskSizeGb *int64 `json:"diskSizeGb"` + EnableNestedVirtualization *bool `json:"enableNestedVirtualization"` + NoExternalIP *bool `json:"noExternalIP"` +} + +type jsonWorkerPoolWorkerConfig WorkerPoolWorkerConfig + +func (r *WorkerPoolWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolWorkerConfig + } else { + + r.MachineType = res.MachineType + + r.DiskSizeGb = res.DiskSizeGb + + r.EnableNestedVirtualization = res.EnableNestedVirtualization + + r.NoExternalIP = res.NoExternalIP + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolWorkerConfig *WorkerPoolWorkerConfig = &WorkerPoolWorkerConfig{empty: true} + +func (r *WorkerPoolWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolNetworkConfig struct { + empty bool `json:"-"` + PeeredNetwork *string `json:"peeredNetwork"` + PeeredNetworkIPRange *string `json:"peeredNetworkIPRange"` +} + +type jsonWorkerPoolNetworkConfig WorkerPoolNetworkConfig + +func (r *WorkerPoolNetworkConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolNetworkConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolNetworkConfig + } else { + + r.PeeredNetwork = res.PeeredNetwork + + r.PeeredNetworkIPRange = res.PeeredNetworkIPRange + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolNetworkConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolNetworkConfig *WorkerPoolNetworkConfig = &WorkerPoolNetworkConfig{empty: true} + +func (r *WorkerPoolNetworkConfig) Empty() bool { + return r.empty +} + +func (r *WorkerPoolNetworkConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolNetworkConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkerPoolPrivateServiceConnect struct { + empty bool `json:"-"` + NetworkAttachment *string `json:"networkAttachment"` + RouteAllTraffic *bool `json:"routeAllTraffic"` +} + +type jsonWorkerPoolPrivateServiceConnect WorkerPoolPrivateServiceConnect + +func (r *WorkerPoolPrivateServiceConnect) UnmarshalJSON(data []byte) error { + var res jsonWorkerPoolPrivateServiceConnect + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkerPoolPrivateServiceConnect + } else { + + r.NetworkAttachment = res.NetworkAttachment + + r.RouteAllTraffic = res.RouteAllTraffic + + } + return nil +} + +// This object is used to assert a desired state where this WorkerPoolPrivateServiceConnect is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkerPoolPrivateServiceConnect *WorkerPoolPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{empty: true} + +func (r *WorkerPoolPrivateServiceConnect) Empty() bool { + return r.empty +} + +func (r *WorkerPoolPrivateServiceConnect) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkerPoolPrivateServiceConnect) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *WorkerPool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "cloud_build", + Type: "WorkerPool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "cloudbuild", +{{- end }} + } +} + +func (r *WorkerPool) ID() (string, error) { + if err := extractWorkerPoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), + "state": dcl.ValueOrEmptyString(nr.State), + "private_pool_v1_config": dcl.ValueOrEmptyString(nr.PrivatePoolV1Config), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "worker_config": dcl.ValueOrEmptyString(nr.WorkerConfig), + "network_config": dcl.ValueOrEmptyString(nr.NetworkConfig), + "private_service_connect": dcl.ValueOrEmptyString(nr.PrivateServiceConnect), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkerPoolMaxPage = -1 + +type WorkerPoolList struct { + Items []*WorkerPool + + nextToken string + + pageSize int32 + + resource *WorkerPool +} + +func (l *WorkerPoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkerPoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkerPool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkerPool(ctx context.Context, project, location string) (*WorkerPoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkerPoolWithMaxResults(ctx, project, location, WorkerPoolMaxPage) + +} + +func (c *Client) ListWorkerPoolWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*WorkerPoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &WorkerPool{ + Project: &project, + Location: &location, + } + items, token, err := c.listWorkerPool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkerPoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkerPool(ctx context.Context, r *WorkerPool) (*WorkerPool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkerPoolFields(r) + + b, err := c.getWorkerPoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkerPool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkerPoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkerPoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkerPool(ctx context.Context, r *WorkerPool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("WorkerPool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting WorkerPool...") + deleteOp := deleteWorkerPoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkerPool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkerPool(ctx context.Context, project, location string, filter func(*WorkerPool) bool) error { + listObj, err := c.ListWorkerPool(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllWorkerPool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkerPool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkerPool(ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *WorkerPool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkerPoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkerPoolHelper(c *Client, ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkerPool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkerPoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workerPoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkerPoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workerPoolApiOperation + if create { + ops = append(ops, &createWorkerPoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkerPoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkerPoolDiff(c *Client, ctx context.Context, desired *WorkerPool, rawDesired *WorkerPool, ops []workerPoolApiOperation, opts ...dcl.ApplyOption) (*WorkerPool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkerPool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkerPoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkerPool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkerPoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkerPoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkerPoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkerPoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkerPoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkerPool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl new file mode 100644 index 000000000000..e76775314eef --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_internal.go.tmpl @@ -0,0 +1,3509 @@ +package cloudbuild + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *WorkerPool) validate() error { + + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"NetworkConfig", "PrivatePoolV1Config"}, r.NetworkConfig, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"WorkerConfig", "PrivatePoolV1Config"}, r.WorkerConfig, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"PrivateServiceConnect", "PrivatePoolV1Config"}, r.PrivateServiceConnect, r.PrivatePoolV1Config); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"PrivateServiceConnect", "NetworkConfig"}, r.PrivateServiceConnect, r.NetworkConfig); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.PrivatePoolV1Config) { + if err := r.PrivatePoolV1Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NetworkConfig) { + if err := r.NetworkConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrivateServiceConnect) { + if err := r.PrivateServiceConnect.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkerPoolPrivatePoolV1Config) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"NetworkConfig", "PrivateServiceConnect"}, r.NetworkConfig, r.PrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NetworkConfig) { + if err := r.NetworkConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrivateServiceConnect) { + if err := r.PrivateServiceConnect.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigWorkerConfig) validate() error { + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigNetworkConfig) validate() error { + return nil +} +func (r *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) validate() error { + return nil +} +func (r *WorkerPoolWorkerConfig) validate() error { + return nil +} +func (r *WorkerPoolNetworkConfig) validate() error { + if err := dcl.Required(r, "peeredNetwork"); err != nil { + return err + } + return nil +} +func (r *WorkerPoolPrivateServiceConnect) validate() error { + if err := dcl.Required(r, "networkAttachment"); err != nil { + return err + } + return nil +} +func (r *WorkerPool) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://cloudbuild.googleapis.com/v1/", params) +} + +func (r *WorkerPool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *WorkerPool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkerPool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools?workerPoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkerPool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workerPoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workerPoolApiOperation interface { + do(context.Context, *WorkerPool, *Client) error +} + +// newUpdateWorkerPoolUpdateWorkerPoolRequest creates a request for an +// WorkerPool resource's UpdateWorkerPool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateWorkerPoolUpdateWorkerPoolRequest(ctx context.Context, f *WorkerPool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1Config(c, f.PrivatePoolV1Config, res); err != nil { + return nil, fmt.Errorf("error expanding PrivatePoolV1Config into privatePoolV1Config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["privatePoolV1Config"] = v + } + if v, err := expandWorkerPoolWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["workerConfig"] = v + } + b, err := c.getWorkerPoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateWorkerPoolUpdateWorkerPoolRequest converts the update into +// the final JSON request body. +func marshalUpdateWorkerPoolUpdateWorkerPoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateWorkerPoolUpdateWorkerPoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateWorkerPoolUpdateWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + _, err := c.GetWorkerPool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateWorkerPool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateWorkerPoolUpdateWorkerPoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateWorkerPoolUpdateWorkerPoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listWorkerPoolRaw(ctx context.Context, r *WorkerPool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkerPoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkerPoolOperation struct { + WorkerPools []map[string]interface{} `json:"workerPools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkerPool(ctx context.Context, r *WorkerPool, pageToken string, pageSize int32) ([]*WorkerPool, string, error) { + b, err := c.listWorkerPoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkerPoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*WorkerPool + for _, v := range m.WorkerPools { + res, err := unmarshalMapWorkerPool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkerPool(ctx context.Context, f func(*WorkerPool) bool, resources []*WorkerPool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkerPool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkerPoolOperation struct{} + +func (op *deleteWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + r, err := c.GetWorkerPool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "WorkerPool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkerPool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkerPool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkerPoolOperation struct { + response map[string]interface{} +} + +func (op *createWorkerPoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkerPoolOperation) do(ctx context.Context, r *WorkerPool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetWorkerPool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkerPoolRaw(ctx context.Context, r *WorkerPool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workerPoolDiffsForRawDesired(ctx context.Context, rawDesired *WorkerPool, opts ...dcl.ApplyOption) (initial, desired *WorkerPool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *WorkerPool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*WorkerPool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected WorkerPool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkerPool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a WorkerPool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve WorkerPool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that WorkerPool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for WorkerPool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for WorkerPool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkerPoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkerPoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for WorkerPool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for WorkerPool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkerPool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkerPoolInitialState(rawInitial, rawDesired *WorkerPool) (*WorkerPool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.NetworkConfig) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.NetworkConfig) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.WorkerConfig) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.WorkerConfig = EmptyWorkerPoolWorkerConfig + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WorkerConfig) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.PrivateServiceConnect) { + // Check if anything else is set. + if dcl.AnySet() { + rawInitial.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if !dcl.IsZeroValue(rawInitial.PrivatePoolV1Config) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.PrivateServiceConnect) { + rawInitial.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if !dcl.IsZeroValue(rawInitial.PrivateServiceConnect) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.NetworkConfig) { + rawInitial.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if !dcl.IsZeroValue(rawInitial.NetworkConfig) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.PrivateServiceConnect) { + rawInitial.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkerPoolDesiredState(rawDesired, rawInitial *WorkerPool, opts ...dcl.ApplyOption) (*WorkerPool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.PrivatePoolV1Config = canonicalizeWorkerPoolPrivatePoolV1Config(rawDesired.PrivatePoolV1Config, nil, opts...) + rawDesired.WorkerConfig = canonicalizeWorkerPoolWorkerConfig(rawDesired.WorkerConfig, nil, opts...) + rawDesired.NetworkConfig = canonicalizeWorkerPoolNetworkConfig(rawDesired.NetworkConfig, nil, opts...) + rawDesired.PrivateServiceConnect = canonicalizeWorkerPoolPrivateServiceConnect(rawDesired.PrivateServiceConnect, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &WorkerPool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.PrivatePoolV1Config = canonicalizeWorkerPoolPrivatePoolV1Config(rawDesired.PrivatePoolV1Config, rawInitial.PrivatePoolV1Config, opts...) + canonicalDesired.WorkerConfig = canonicalizeWorkerPoolWorkerConfig(rawDesired.WorkerConfig, rawInitial.WorkerConfig, opts...) + canonicalDesired.NetworkConfig = canonicalizeWorkerPoolNetworkConfig(rawDesired.NetworkConfig, rawInitial.NetworkConfig, opts...) + canonicalDesired.PrivateServiceConnect = canonicalizeWorkerPoolPrivateServiceConnect(rawDesired.PrivateServiceConnect, rawInitial.PrivateServiceConnect, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + + if canonicalDesired.NetworkConfig != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.NetworkConfig) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.WorkerConfig != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.WorkerConfig = EmptyWorkerPoolWorkerConfig + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WorkerConfig) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.PrivateServiceConnect != nil { + // Check if anything else is set. + if dcl.AnySet() { + canonicalDesired.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if canonicalDesired.PrivatePoolV1Config != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.PrivateServiceConnect) { + canonicalDesired.PrivatePoolV1Config = EmptyWorkerPoolPrivatePoolV1Config + } + } + + if canonicalDesired.PrivateServiceConnect != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.NetworkConfig) { + canonicalDesired.PrivateServiceConnect = EmptyWorkerPoolPrivateServiceConnect + } + } + + if canonicalDesired.NetworkConfig != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.PrivateServiceConnect) { + canonicalDesired.NetworkConfig = EmptyWorkerPoolNetworkConfig + } + } + + return canonicalDesired, nil +} + +func canonicalizeWorkerPoolNewState(c *Client, rawNew, rawDesired *WorkerPool) (*WorkerPool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { + rawNew.DeleteTime = rawDesired.DeleteTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.PrivatePoolV1Config) && dcl.IsEmptyValueIndirect(rawDesired.PrivatePoolV1Config) { + rawNew.PrivatePoolV1Config = rawDesired.PrivatePoolV1Config + } else { + rawNew.PrivatePoolV1Config = canonicalizeNewWorkerPoolPrivatePoolV1Config(c, rawDesired.PrivatePoolV1Config, rawNew.PrivatePoolV1Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkerConfig) { + rawNew.WorkerConfig = rawDesired.WorkerConfig + } else { + rawNew.WorkerConfig = canonicalizeNewWorkerPoolWorkerConfig(c, rawDesired.WorkerConfig, rawNew.WorkerConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.NetworkConfig) && dcl.IsEmptyValueIndirect(rawDesired.NetworkConfig) { + rawNew.NetworkConfig = rawDesired.NetworkConfig + } else { + rawNew.NetworkConfig = canonicalizeNewWorkerPoolNetworkConfig(c, rawDesired.NetworkConfig, rawNew.NetworkConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.PrivateServiceConnect) && dcl.IsEmptyValueIndirect(rawDesired.PrivateServiceConnect) { + rawNew.PrivateServiceConnect = rawDesired.PrivateServiceConnect + } else { + rawNew.PrivateServiceConnect = canonicalizeNewWorkerPoolPrivateServiceConnect(c, rawDesired.PrivateServiceConnect, rawNew.PrivateServiceConnect) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkerPoolPrivatePoolV1Config(des, initial *WorkerPoolPrivatePoolV1Config, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1Config { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.NetworkConfig != nil || (initial != nil && initial.NetworkConfig != nil) { + // Check if anything else is set. + if dcl.AnySet() { + des.NetworkConfig = nil + if initial != nil { + initial.NetworkConfig = nil + } + } + } + + if des.PrivateServiceConnect != nil || (initial != nil && initial.PrivateServiceConnect != nil) { + // Check if anything else is set. + if dcl.AnySet() { + des.PrivateServiceConnect = nil + if initial != nil { + initial.PrivateServiceConnect = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1Config{} + + cDes.WorkerConfig = canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) + cDes.NetworkConfig = canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(des.NetworkConfig, initial.NetworkConfig, opts...) + cDes.PrivateServiceConnect = canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(des.PrivateServiceConnect, initial.PrivateServiceConnect, opts...) + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigSlice(des, initial []WorkerPoolPrivatePoolV1Config, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1Config { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1Config(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1Config(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1Config(c *Client, des, nw *WorkerPoolPrivatePoolV1Config) *WorkerPoolPrivatePoolV1Config { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1Config while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.WorkerConfig = canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) + nw.NetworkConfig = canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, des.NetworkConfig, nw.NetworkConfig) + nw.PrivateServiceConnect = canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, des.PrivateServiceConnect, nw.PrivateServiceConnect) + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1Config) []WorkerPoolPrivatePoolV1Config { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1Config + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1Config(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1Config) []WorkerPoolPrivatePoolV1Config { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1Config + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1Config(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(des, initial *WorkerPoolPrivatePoolV1ConfigWorkerConfig, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + if dcl.IsZeroValue(des.DiskSizeGb) || (dcl.IsEmptyValueIndirect(des.DiskSizeGb) && dcl.IsEmptyValueIndirect(initial.DiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DiskSizeGb = initial.DiskSizeGb + } else { + cDes.DiskSizeGb = des.DiskSizeGb + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, initial.EnableNestedVirtualization) || dcl.IsZeroValue(des.EnableNestedVirtualization) { + cDes.EnableNestedVirtualization = initial.EnableNestedVirtualization + } else { + cDes.EnableNestedVirtualization = des.EnableNestedVirtualization + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(des, initial []WorkerPoolPrivatePoolV1ConfigWorkerConfig, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigWorkerConfig) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, nw.EnableNestedVirtualization) { + nw.EnableNestedVirtualization = des.EnableNestedVirtualization + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigWorkerConfig) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigWorkerConfig) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(des, initial *WorkerPoolPrivatePoolV1ConfigNetworkConfig, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + + if dcl.IsZeroValue(des.PeeredNetwork) || (dcl.IsEmptyValueIndirect(des.PeeredNetwork) && dcl.IsEmptyValueIndirect(initial.PeeredNetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PeeredNetwork = initial.PeeredNetwork + } else { + cDes.PeeredNetwork = des.PeeredNetwork + } + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, initial.PeeredNetworkIPRange) || dcl.IsZeroValue(des.PeeredNetworkIPRange) { + cDes.PeeredNetworkIPRange = initial.PeeredNetworkIPRange + } else { + cDes.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + if dcl.IsZeroValue(des.EgressOption) || (dcl.IsEmptyValueIndirect(des.EgressOption) && dcl.IsEmptyValueIndirect(initial.EgressOption)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EgressOption = initial.EgressOption + } else { + cDes.EgressOption = des.EgressOption + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(des, initial []WorkerPoolPrivatePoolV1ConfigNetworkConfig, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigNetworkConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigNetworkConfig) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, nw.PeeredNetworkIPRange) { + nw.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfigSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigNetworkConfig) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigNetworkConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigNetworkConfig) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigNetworkConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(des, initial *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, opts ...dcl.ApplyOption) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + + if dcl.IsZeroValue(des.NetworkAttachment) || (dcl.IsEmptyValueIndirect(des.NetworkAttachment) && dcl.IsEmptyValueIndirect(initial.NetworkAttachment)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NetworkAttachment = initial.NetworkAttachment + } else { + cDes.NetworkAttachment = des.NetworkAttachment + } + if dcl.BoolCanonicalize(des.PublicIPAddressDisabled, initial.PublicIPAddressDisabled) || dcl.IsZeroValue(des.PublicIPAddressDisabled) { + cDes.PublicIPAddressDisabled = initial.PublicIPAddressDisabled + } else { + cDes.PublicIPAddressDisabled = des.PublicIPAddressDisabled + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, initial.RouteAllTraffic) || dcl.IsZeroValue(des.RouteAllTraffic) { + cDes.RouteAllTraffic = initial.RouteAllTraffic + } else { + cDes.RouteAllTraffic = des.RouteAllTraffic + } + + return cDes +} + +func canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(des, initial []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, opts ...dcl.ApplyOption) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, des, nw *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.PublicIPAddressDisabled, nw.PublicIPAddressDisabled) { + nw.PublicIPAddressDisabled = des.PublicIPAddressDisabled + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, nw.RouteAllTraffic) { + nw.RouteAllTraffic = des.RouteAllTraffic + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSet(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, des, nw []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolWorkerConfig(des, initial *WorkerPoolWorkerConfig, opts ...dcl.ApplyOption) *WorkerPoolWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolWorkerConfig{} + + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + if dcl.IsZeroValue(des.DiskSizeGb) || (dcl.IsEmptyValueIndirect(des.DiskSizeGb) && dcl.IsEmptyValueIndirect(initial.DiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DiskSizeGb = initial.DiskSizeGb + } else { + cDes.DiskSizeGb = des.DiskSizeGb + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, initial.EnableNestedVirtualization) || dcl.IsZeroValue(des.EnableNestedVirtualization) { + cDes.EnableNestedVirtualization = initial.EnableNestedVirtualization + } else { + cDes.EnableNestedVirtualization = des.EnableNestedVirtualization + } + if dcl.BoolCanonicalize(des.NoExternalIP, initial.NoExternalIP) || dcl.IsZeroValue(des.NoExternalIP) { + cDes.NoExternalIP = initial.NoExternalIP + } else { + cDes.NoExternalIP = des.NoExternalIP + } + + return cDes +} + +func canonicalizeWorkerPoolWorkerConfigSlice(des, initial []WorkerPoolWorkerConfig, opts ...dcl.ApplyOption) []WorkerPoolWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolWorkerConfig(c *Client, des, nw *WorkerPoolWorkerConfig) *WorkerPoolWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + if dcl.BoolCanonicalize(des.EnableNestedVirtualization, nw.EnableNestedVirtualization) { + nw.EnableNestedVirtualization = des.EnableNestedVirtualization + } + if dcl.BoolCanonicalize(des.NoExternalIP, nw.NoExternalIP) { + nw.NoExternalIP = des.NoExternalIP + } + + return nw +} + +func canonicalizeNewWorkerPoolWorkerConfigSet(c *Client, des, nw []WorkerPoolWorkerConfig) []WorkerPoolWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolWorkerConfigSlice(c *Client, des, nw []WorkerPoolWorkerConfig) []WorkerPoolWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolNetworkConfig(des, initial *WorkerPoolNetworkConfig, opts ...dcl.ApplyOption) *WorkerPoolNetworkConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolNetworkConfig{} + + if dcl.IsZeroValue(des.PeeredNetwork) || (dcl.IsEmptyValueIndirect(des.PeeredNetwork) && dcl.IsEmptyValueIndirect(initial.PeeredNetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PeeredNetwork = initial.PeeredNetwork + } else { + cDes.PeeredNetwork = des.PeeredNetwork + } + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, initial.PeeredNetworkIPRange) || dcl.IsZeroValue(des.PeeredNetworkIPRange) { + cDes.PeeredNetworkIPRange = initial.PeeredNetworkIPRange + } else { + cDes.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return cDes +} + +func canonicalizeWorkerPoolNetworkConfigSlice(des, initial []WorkerPoolNetworkConfig, opts ...dcl.ApplyOption) []WorkerPoolNetworkConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolNetworkConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolNetworkConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolNetworkConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolNetworkConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolNetworkConfig(c *Client, des, nw *WorkerPoolNetworkConfig) *WorkerPoolNetworkConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolNetworkConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PeeredNetworkIPRange, nw.PeeredNetworkIPRange) { + nw.PeeredNetworkIPRange = des.PeeredNetworkIPRange + } + + return nw +} + +func canonicalizeNewWorkerPoolNetworkConfigSet(c *Client, des, nw []WorkerPoolNetworkConfig) []WorkerPoolNetworkConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolNetworkConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolNetworkConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolNetworkConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolNetworkConfigSlice(c *Client, des, nw []WorkerPoolNetworkConfig) []WorkerPoolNetworkConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolNetworkConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolNetworkConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkerPoolPrivateServiceConnect(des, initial *WorkerPoolPrivateServiceConnect, opts ...dcl.ApplyOption) *WorkerPoolPrivateServiceConnect { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkerPoolPrivateServiceConnect{} + + if dcl.IsZeroValue(des.NetworkAttachment) || (dcl.IsEmptyValueIndirect(des.NetworkAttachment) && dcl.IsEmptyValueIndirect(initial.NetworkAttachment)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NetworkAttachment = initial.NetworkAttachment + } else { + cDes.NetworkAttachment = des.NetworkAttachment + } + if dcl.BoolCanonicalize(des.RouteAllTraffic, initial.RouteAllTraffic) || dcl.IsZeroValue(des.RouteAllTraffic) { + cDes.RouteAllTraffic = initial.RouteAllTraffic + } else { + cDes.RouteAllTraffic = des.RouteAllTraffic + } + + return cDes +} + +func canonicalizeWorkerPoolPrivateServiceConnectSlice(des, initial []WorkerPoolPrivateServiceConnect, opts ...dcl.ApplyOption) []WorkerPoolPrivateServiceConnect { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkerPoolPrivateServiceConnect(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkerPoolPrivateServiceConnect(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkerPoolPrivateServiceConnect(c *Client, des, nw *WorkerPoolPrivateServiceConnect) *WorkerPoolPrivateServiceConnect { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkerPoolPrivateServiceConnect while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.RouteAllTraffic, nw.RouteAllTraffic) { + nw.RouteAllTraffic = des.RouteAllTraffic + } + + return nw +} + +func canonicalizeNewWorkerPoolPrivateServiceConnectSet(c *Client, des, nw []WorkerPoolPrivateServiceConnect) []WorkerPoolPrivateServiceConnect { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkerPoolPrivateServiceConnect + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkerPoolPrivateServiceConnectNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkerPoolPrivateServiceConnect(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkerPoolPrivateServiceConnectSlice(c *Client, des, nw []WorkerPoolPrivateServiceConnect) []WorkerPoolPrivateServiceConnect { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkerPoolPrivateServiceConnect + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkerPoolPrivateServiceConnect(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkerPool(c *Client, desired, actual *WorkerPool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivatePoolV1Config, actual.PrivatePoolV1Config, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1Config, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivatePoolV1Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkerPoolWorkerConfigNewStyle, EmptyObject: EmptyWorkerPoolWorkerConfig, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolNetworkConfigNewStyle, EmptyObject: EmptyWorkerPoolNetworkConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateServiceConnect, actual.PrivateServiceConnect, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivateServiceConnectNewStyle, EmptyObject: EmptyWorkerPoolPrivateServiceConnect, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateServiceConnect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkerPoolPrivatePoolV1ConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1Config) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1Config) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1Config or *WorkerPoolPrivatePoolV1Config", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1Config) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1Config) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1Config", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NetworkConfig, actual.NetworkConfig, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateServiceConnect, actual.PrivateServiceConnect, dcl.DiffInfo{ObjectFunction: compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle, EmptyObject: EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateServiceConnect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigWorkerConfig or *WorkerPoolPrivatePoolV1ConfigWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskSizeGb, actual.DiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableNestedVirtualization, actual.EnableNestedVirtualization, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EnableNestedVirtualization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigNetworkConfig or *WorkerPoolPrivatePoolV1ConfigNetworkConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigNetworkConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PeeredNetwork, actual.PeeredNetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetwork")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PeeredNetworkIPRange, actual.PeeredNetworkIPRange, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetworkIpRange")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EgressOption, actual.EgressOption, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EgressOption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect or *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NetworkAttachment, actual.NetworkAttachment, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkAttachment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicIPAddressDisabled, actual.PublicIPAddressDisabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicIpAddressDisabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteAllTraffic, actual.RouteAllTraffic, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RouteAllTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolWorkerConfig or *WorkerPoolWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskSizeGb, actual.DiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("DiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableNestedVirtualization, actual.EnableNestedVirtualization, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("EnableNestedVirtualization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NoExternalIP, actual.NoExternalIP, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateWorkerPoolUpdateWorkerPoolOperation")}, fn.AddNest("NoExternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolNetworkConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolNetworkConfig) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolNetworkConfig or *WorkerPoolNetworkConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolNetworkConfig) + if !ok { + actualNotPointer, ok := a.(WorkerPoolNetworkConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolNetworkConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PeeredNetwork, actual.PeeredNetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetwork")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PeeredNetworkIPRange, actual.PeeredNetworkIPRange, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PeeredNetworkIpRange")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkerPoolPrivateServiceConnectNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkerPoolPrivateServiceConnect) + if !ok { + desiredNotPointer, ok := d.(WorkerPoolPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivateServiceConnect or *WorkerPoolPrivateServiceConnect", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkerPoolPrivateServiceConnect) + if !ok { + actualNotPointer, ok := a.(WorkerPoolPrivateServiceConnect) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkerPoolPrivateServiceConnect", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NetworkAttachment, actual.NetworkAttachment, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkAttachment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteAllTraffic, actual.RouteAllTraffic, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RouteAllTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *WorkerPool) urlNormalized() *WorkerPool { + normalized := dcl.Copy(*r).(WorkerPool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *WorkerPool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateWorkerPool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the WorkerPool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *WorkerPool) marshal(c *Client) ([]byte, error) { + m, err := expandWorkerPool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling WorkerPool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkerPool decodes JSON responses into the WorkerPool resource schema. +func unmarshalWorkerPool(b []byte, c *Client, res *WorkerPool) (*WorkerPool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkerPool(m, c, res) +} + +func unmarshalMapWorkerPool(m map[string]interface{}, c *Client, res *WorkerPool) (*WorkerPool, error) { + + flattened := flattenWorkerPool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkerPool expands WorkerPool into a JSON request object. +func expandWorkerPool(c *Client, f *WorkerPool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/workerPools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1Config(c, f.PrivatePoolV1Config, res); err != nil { + return nil, fmt.Errorf("error expanding PrivatePoolV1Config into privatePoolV1Config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privatePoolV1Config"] = v + } + if v, err := expandWorkerPoolWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkerPoolNetworkConfig(c, f.NetworkConfig, res); err != nil { + return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networkConfig"] = v + } + if v, err := expandWorkerPoolPrivateServiceConnect(c, f.PrivateServiceConnect, res); err != nil { + return nil, fmt.Errorf("error expanding PrivateServiceConnect into privateServiceConnect: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privateServiceConnect"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkerPool flattens WorkerPool from a JSON request object into the +// WorkerPool type. +func flattenWorkerPool(c *Client, i interface{}, res *WorkerPool) *WorkerPool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &WorkerPool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) + resultRes.State = flattenWorkerPoolStateEnum(m["state"]) + resultRes.PrivatePoolV1Config = flattenWorkerPoolPrivatePoolV1Config(c, m["privatePoolV1Config"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.WorkerConfig = flattenWorkerPoolWorkerConfig(c, m["workerConfig"], res) + resultRes.NetworkConfig = flattenWorkerPoolNetworkConfig(c, m["networkConfig"], res) + resultRes.PrivateServiceConnect = flattenWorkerPoolPrivateServiceConnect(c, m["privateServiceConnect"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkerPoolPrivatePoolV1ConfigMap expands the contents of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1Config, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1Config(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigSlice expands the contents of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1Config, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1Config(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigMap flattens the contents of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1Config { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1Config{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1Config{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1Config) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1Config(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigSlice flattens the contents of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1Config { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1Config{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1Config{} + } + + items := make([]WorkerPoolPrivatePoolV1Config, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1Config(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1Config expands an instance of WorkerPoolPrivatePoolV1Config into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1Config(c *Client, f *WorkerPoolPrivatePoolV1Config, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, f.NetworkConfig, res); err != nil { + return nil, fmt.Errorf("error expanding NetworkConfig into networkConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networkConfig"] = v + } + if v, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, f.PrivateServiceConnect, res); err != nil { + return nil, fmt.Errorf("error expanding PrivateServiceConnect into privateServiceConnect: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["privateServiceConnect"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1Config flattens an instance of WorkerPoolPrivatePoolV1Config from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1Config(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1Config { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1Config{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1Config + } + r.WorkerConfig = flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, m["workerConfig"], res) + r.NetworkConfig = flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, m["networkConfig"], res) + r.PrivateServiceConnect = flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, m["privateServiceConnect"], res) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfigMap expands the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice expands the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigMap flattens the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigWorkerConfig expands an instance of WorkerPoolPrivatePoolV1ConfigWorkerConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, f *WorkerPoolPrivatePoolV1ConfigWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineType"] = v + } + if v := f.DiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["diskSizeGb"] = v + } + if v := f.EnableNestedVirtualization; !dcl.IsEmptyValueIndirect(v) { + m["enableNestedVirtualization"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig flattens an instance of WorkerPoolPrivatePoolV1ConfigWorkerConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigWorkerConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigWorkerConfig + } + r.MachineType = dcl.FlattenString(m["machineType"]) + r.DiskSizeGb = dcl.FlattenInteger(m["diskSizeGb"]) + r.EnableNestedVirtualization = dcl.FlattenBool(m["enableNestedVirtualization"]) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfigMap expands the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfigMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice expands the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigMap flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigNetworkConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigNetworkConfig expands an instance of WorkerPoolPrivatePoolV1ConfigNetworkConfig into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, f *WorkerPoolPrivatePoolV1ConfigNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PeeredNetwork; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetwork"] = v + } + if v := f.PeeredNetworkIPRange; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetworkIpRange"] = v + } + if v := f.EgressOption; !dcl.IsEmptyValueIndirect(v) { + m["egressOption"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig flattens an instance of WorkerPoolPrivatePoolV1ConfigNetworkConfig from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigNetworkConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigNetworkConfig + } + r.PeeredNetwork = dcl.FlattenString(m["peeredNetwork"]) + r.PeeredNetworkIPRange = dcl.FlattenString(m["peeredNetworkIpRange"]) + r.EgressOption = flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(m["egressOption"]) + + return r +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap expands the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap(c *Client, f map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice expands the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, f []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap flattens the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect expands an instance of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, f *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NetworkAttachment; !dcl.IsEmptyValueIndirect(v) { + m["networkAttachment"] = v + } + if v := f.PublicIPAddressDisabled; !dcl.IsEmptyValueIndirect(v) { + m["publicIpAddressDisabled"] = v + } + if v := f.RouteAllTraffic; !dcl.IsEmptyValueIndirect(v) { + m["routeAllTraffic"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect flattens an instance of WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivatePoolV1ConfigPrivateServiceConnect + } + r.NetworkAttachment = dcl.FlattenString(m["networkAttachment"]) + r.PublicIPAddressDisabled = dcl.FlattenBool(m["publicIpAddressDisabled"]) + r.RouteAllTraffic = dcl.FlattenBool(m["routeAllTraffic"]) + + return r +} + +// expandWorkerPoolWorkerConfigMap expands the contents of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfigMap(c *Client, f map[string]WorkerPoolWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolWorkerConfigSlice expands the contents of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfigSlice(c *Client, f []WorkerPoolWorkerConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolWorkerConfigMap flattens the contents of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolWorkerConfig{} + } + + items := make(map[string]WorkerPoolWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolWorkerConfigSlice flattens the contents of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolWorkerConfig{} + } + + if len(a) == 0 { + return []WorkerPoolWorkerConfig{} + } + + items := make([]WorkerPoolWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolWorkerConfig expands an instance of WorkerPoolWorkerConfig into a JSON +// request object. +func expandWorkerPoolWorkerConfig(c *Client, f *WorkerPoolWorkerConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineType"] = v + } + if v := f.DiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["diskSizeGb"] = v + } + if v := f.EnableNestedVirtualization; !dcl.IsEmptyValueIndirect(v) { + m["enableNestedVirtualization"] = v + } + if v := f.NoExternalIP; !dcl.IsEmptyValueIndirect(v) { + m["noExternalIp"] = v + } + + return m, nil +} + +// flattenWorkerPoolWorkerConfig flattens an instance of WorkerPoolWorkerConfig from a JSON +// response object. +func flattenWorkerPoolWorkerConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolWorkerConfig + } + r.MachineType = dcl.FlattenString(m["machineType"]) + r.DiskSizeGb = dcl.FlattenInteger(m["diskSizeGb"]) + r.EnableNestedVirtualization = dcl.FlattenBool(m["enableNestedVirtualization"]) + r.NoExternalIP = dcl.FlattenBool(m["noExternalIp"]) + + return r +} + +// expandWorkerPoolNetworkConfigMap expands the contents of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfigMap(c *Client, f map[string]WorkerPoolNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolNetworkConfigSlice expands the contents of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfigSlice(c *Client, f []WorkerPoolNetworkConfig, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolNetworkConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolNetworkConfigMap flattens the contents of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfigMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolNetworkConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolNetworkConfig{} + } + + if len(a) == 0 { + return map[string]WorkerPoolNetworkConfig{} + } + + items := make(map[string]WorkerPoolNetworkConfig) + for k, item := range a { + items[k] = *flattenWorkerPoolNetworkConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolNetworkConfigSlice flattens the contents of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfigSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolNetworkConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolNetworkConfig{} + } + + if len(a) == 0 { + return []WorkerPoolNetworkConfig{} + } + + items := make([]WorkerPoolNetworkConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolNetworkConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolNetworkConfig expands an instance of WorkerPoolNetworkConfig into a JSON +// request object. +func expandWorkerPoolNetworkConfig(c *Client, f *WorkerPoolNetworkConfig, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PeeredNetwork; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetwork"] = v + } + if v := f.PeeredNetworkIPRange; !dcl.IsEmptyValueIndirect(v) { + m["peeredNetworkIpRange"] = v + } + + return m, nil +} + +// flattenWorkerPoolNetworkConfig flattens an instance of WorkerPoolNetworkConfig from a JSON +// response object. +func flattenWorkerPoolNetworkConfig(c *Client, i interface{}, res *WorkerPool) *WorkerPoolNetworkConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolNetworkConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolNetworkConfig + } + r.PeeredNetwork = dcl.FlattenString(m["peeredNetwork"]) + r.PeeredNetworkIPRange = dcl.FlattenString(m["peeredNetworkIpRange"]) + + return r +} + +// expandWorkerPoolPrivateServiceConnectMap expands the contents of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnectMap(c *Client, f map[string]WorkerPoolPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkerPoolPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkerPoolPrivateServiceConnectSlice expands the contents of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnectSlice(c *Client, f []WorkerPoolPrivateServiceConnect, res *WorkerPool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkerPoolPrivateServiceConnect(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkerPoolPrivateServiceConnectMap flattens the contents of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnectMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivateServiceConnect { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivateServiceConnect{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivateServiceConnect{} + } + + items := make(map[string]WorkerPoolPrivateServiceConnect) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivateServiceConnect(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkerPoolPrivateServiceConnectSlice flattens the contents of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnectSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivateServiceConnect { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivateServiceConnect{} + } + + if len(a) == 0 { + return []WorkerPoolPrivateServiceConnect{} + } + + items := make([]WorkerPoolPrivateServiceConnect, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivateServiceConnect(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkerPoolPrivateServiceConnect expands an instance of WorkerPoolPrivateServiceConnect into a JSON +// request object. +func expandWorkerPoolPrivateServiceConnect(c *Client, f *WorkerPoolPrivateServiceConnect, res *WorkerPool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NetworkAttachment; !dcl.IsEmptyValueIndirect(v) { + m["networkAttachment"] = v + } + if v := f.RouteAllTraffic; !dcl.IsEmptyValueIndirect(v) { + m["routeAllTraffic"] = v + } + + return m, nil +} + +// flattenWorkerPoolPrivateServiceConnect flattens an instance of WorkerPoolPrivateServiceConnect from a JSON +// response object. +func flattenWorkerPoolPrivateServiceConnect(c *Client, i interface{}, res *WorkerPool) *WorkerPoolPrivateServiceConnect { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkerPoolPrivateServiceConnect{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkerPoolPrivateServiceConnect + } + r.NetworkAttachment = dcl.FlattenString(m["networkAttachment"]) + r.RouteAllTraffic = dcl.FlattenBool(m["routeAllTraffic"]) + + return r +} + +// flattenWorkerPoolStateEnumMap flattens the contents of WorkerPoolStateEnum from a JSON +// response object. +func flattenWorkerPoolStateEnumMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolStateEnum{} + } + + if len(a) == 0 { + return map[string]WorkerPoolStateEnum{} + } + + items := make(map[string]WorkerPoolStateEnum) + for k, item := range a { + items[k] = *flattenWorkerPoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenWorkerPoolStateEnumSlice flattens the contents of WorkerPoolStateEnum from a JSON +// response object. +func flattenWorkerPoolStateEnumSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolStateEnum{} + } + + if len(a) == 0 { + return []WorkerPoolStateEnum{} + } + + items := make([]WorkerPoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkerPoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *WorkerPoolStateEnum with the same value as that string. +func flattenWorkerPoolStateEnum(i interface{}) *WorkerPoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkerPoolStateEnumRef(s) +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumMap flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumMap(c *Client, i interface{}, res *WorkerPool) map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + if len(a) == 0 { + return map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + items := make(map[string]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum) + for k, item := range a { + items[k] = *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(item.(interface{})) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumSlice flattens the contents of WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum from a JSON +// response object. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumSlice(c *Client, i interface{}, res *WorkerPool) []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + if len(a) == 0 { + return []WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum{} + } + + items := make([]WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum asserts that an interface is a string, and returns a +// pointer to a *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum with the same value as that string. +func flattenWorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum(i interface{}) *WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkerPoolPrivatePoolV1ConfigNetworkConfigEgressOptionEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *WorkerPool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkerPool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workerPoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workerPoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkerPoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workerPoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workerPoolDiff + // For each operation name, create a workerPoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workerPoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkerPoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkerPoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workerPoolApiOperation, error) { + switch opName { + + case "updateWorkerPoolUpdateWorkerPoolOperation": + return &updateWorkerPoolUpdateWorkerPoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkerPoolFields(r *WorkerPool) error { + if dcl.IsEmptyValueIndirect(r.PrivatePoolV1Config) { + r.PrivatePoolV1Config = betaToGaPrivatePool(r, r.PrivatePoolV1Config) + } + vPrivatePoolV1Config := r.PrivatePoolV1Config + if vPrivatePoolV1Config == nil { + // note: explicitly not the empty object. + vPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigFields(r, vPrivatePoolV1Config); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivatePoolV1Config) { + r.PrivatePoolV1Config = vPrivatePoolV1Config + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolWorkerConfig{} + } + if err := extractWorkerPoolWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vNetworkConfig := r.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolNetworkConfig{} + } + if err := extractWorkerPoolNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + r.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := r.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + r.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1Config) error { + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vNetworkConfig := o.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + o.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := o.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + o.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigWorkerConfig) error { + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigNetworkConfig) error { + return nil +} +func extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) error { + return nil +} +func extractWorkerPoolWorkerConfigFields(r *WorkerPool, o *WorkerPoolWorkerConfig) error { + return nil +} +func extractWorkerPoolNetworkConfigFields(r *WorkerPool, o *WorkerPoolNetworkConfig) error { + return nil +} +func extractWorkerPoolPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivateServiceConnect) error { + return nil +} + +func postReadExtractWorkerPoolFields(r *WorkerPool) error { + + r.PrivatePoolV1Config = gaToBetaPrivatePool(r, r.PrivatePoolV1Config) + vPrivatePoolV1Config := r.PrivatePoolV1Config + if vPrivatePoolV1Config == nil { + // note: explicitly not the empty object. + vPrivatePoolV1Config = &WorkerPoolPrivatePoolV1Config{} + } + if err := postReadExtractWorkerPoolPrivatePoolV1ConfigFields(r, vPrivatePoolV1Config); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivatePoolV1Config) { + r.PrivatePoolV1Config = vPrivatePoolV1Config + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolWorkerConfig{} + } + if err := postReadExtractWorkerPoolWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vNetworkConfig := r.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolNetworkConfig{} + } + if err := postReadExtractWorkerPoolNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + r.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := r.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivateServiceConnect{} + } + if err := postReadExtractWorkerPoolPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + r.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1Config) error { + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkerPoolPrivatePoolV1ConfigWorkerConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vNetworkConfig := o.NetworkConfig + if vNetworkConfig == nil { + // note: explicitly not the empty object. + vNetworkConfig = &WorkerPoolPrivatePoolV1ConfigNetworkConfig{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r, vNetworkConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworkConfig) { + o.NetworkConfig = vNetworkConfig + } + vPrivateServiceConnect := o.PrivateServiceConnect + if vPrivateServiceConnect == nil { + // note: explicitly not the empty object. + vPrivateServiceConnect = &WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect{} + } + if err := extractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r, vPrivateServiceConnect); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrivateServiceConnect) { + o.PrivateServiceConnect = vPrivateServiceConnect + } + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigWorkerConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigWorkerConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigNetworkConfigFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigNetworkConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivatePoolV1ConfigPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivatePoolV1ConfigPrivateServiceConnect) error { + return nil +} +func postReadExtractWorkerPoolWorkerConfigFields(r *WorkerPool, o *WorkerPoolWorkerConfig) error { + return nil +} +func postReadExtractWorkerPoolNetworkConfigFields(r *WorkerPool, o *WorkerPoolNetworkConfig) error { + return nil +} +func postReadExtractWorkerPoolPrivateServiceConnectFields(r *WorkerPool, o *WorkerPoolPrivateServiceConnect) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl new file mode 100644 index 000000000000..2d536198254e --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl @@ -0,0 +1,296 @@ +package cloudbuild + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLWorkerPoolSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "CloudBuild/WorkerPool", + Description: "The CloudBuild WorkerPool resource", + StructName: "WorkerPool", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a WorkerPool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workerPool", + Required: true, + Description: "A full instance of a WorkerPool", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a WorkerPool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workerPool", + Required: true, + Description: "A full instance of a WorkerPool", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a WorkerPool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workerPool", + Required: true, + Description: "A full instance of a WorkerPool", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all WorkerPool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many WorkerPool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "WorkerPool": &dcl.Component{ + Title: "WorkerPool", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Time at which the request to create the `WorkerPool` was received.", + Immutable: true, + }, + "deleteTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "DeleteTime", + ReadOnly: true, + Description: "Output only. Time at which the request to delete the `WorkerPool` was received.", + Immutable: true, + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "User-defined name of the `WorkerPool`.", + Immutable: true, + HasLongForm: true, + }, + "networkConfig": &dcl.Property{ + Type: "object", + GoName: "NetworkConfig", + GoType: "WorkerPoolNetworkConfig", + Description: "Network configuration for the `WorkerPool`.", + Immutable: true, + Conflicts: []string{ + "privateServiceConnect", + }, + Required: []string{ + "peeredNetwork", + }, + Properties: map[string]*dcl.Property{ + "peeredNetwork": &dcl.Property{ + Type: "string", + GoName: "PeeredNetwork", + Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Network", + Field: "selfLink", + }, + }, + }, + "peeredNetworkIPRange": &dcl.Property{ + Type: "string", + GoName: "PeeredNetworkIPRange", + Description: "Optional. Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.", + Immutable: true, + }, + }, + }, + "privateServiceConnect": &dcl.Property{ + Type: "object", + GoName: "PrivateServiceConnect", + GoType: "WorkerPoolPrivateServiceConnect", + Description: "Private Service Connect configuration for the pool.", + Immutable: true, + Conflicts: []string{ + "networkConfig", + }, + Required: []string{ + "networkAttachment", + }, + Properties: map[string]*dcl.Property{ + "networkAttachment": &dcl.Property{ + Type: "string", + GoName: "NetworkAttachment", + Description: "Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/NetworkAttachment", + Field: "selfLink", + }, + }, + }, + "routeAllTraffic": &dcl.Property{ + Type: "boolean", + GoName: "RouteAllTraffic", + Description: "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", + Immutable: true, + }, + }, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "WorkerPoolStateEnum", + ReadOnly: true, + Description: "Output only. `WorkerPool` state. Possible values: STATE_UNSPECIFIED, PENDING, APPROVED, REJECTED, CANCELLED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "PENDING", + "APPROVED", + "REJECTED", + "CANCELLED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A unique identifier for the `WorkerPool`.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Time at which the request to update the `WorkerPool` was received.", + Immutable: true, + }, + "workerConfig": &dcl.Property{ + Type: "object", + GoName: "WorkerConfig", + GoType: "WorkerPoolWorkerConfig", + Description: "Configuration to be used for a creating workers in the `WorkerPool`.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "diskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "DiskSizeGb", + Description: "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", + }, + "enableNestedVirtualization": &dcl.Property{ + Type: "boolean", + GoName: "EnableNestedVirtualization", + Description: "Enable nested virtualization on the worker, if supported by the machine type. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will set this to false.", + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Machine type of a worker, such as `n1-standard-1`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.", + }, + "noExternalIP": &dcl.Property{ + Type: "boolean", + GoName: "NoExternalIP", + Description: "If true, workers are created without any public address, which prevents network egress to public IPs.", + ServerDefault: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl new file mode 100644 index 000000000000..72b5cafe657f --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl @@ -0,0 +1,18 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl new file mode 100644 index 000000000000..40a981ea38fa --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl @@ -0,0 +1,1658 @@ +package clouddeploy + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type DeliveryPipeline struct { + Name *string `json:"name"` + Uid *string `json:"uid"` + Description *string `json:"description"` + Annotations map[string]string `json:"annotations"` + Labels map[string]string `json:"labels"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + SerialPipeline *DeliveryPipelineSerialPipeline `json:"serialPipeline"` + Condition *DeliveryPipelineCondition `json:"condition"` + Etag *string `json:"etag"` + Project *string `json:"project"` + Location *string `json:"location"` + Suspended *bool `json:"suspended"` +} + +func (r *DeliveryPipeline) String() string { + return dcl.SprintResource(r) +} + +type DeliveryPipelineSerialPipeline struct { + empty bool `json:"-"` + Stages []DeliveryPipelineSerialPipelineStages `json:"stages"` +} + +type jsonDeliveryPipelineSerialPipeline DeliveryPipelineSerialPipeline + +func (r *DeliveryPipelineSerialPipeline) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipeline + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipeline + } else { + + r.Stages = res.Stages + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipeline is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipeline *DeliveryPipelineSerialPipeline = &DeliveryPipelineSerialPipeline{empty: true} + +func (r *DeliveryPipelineSerialPipeline) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipeline) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipeline) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStages struct { + empty bool `json:"-"` + TargetId *string `json:"targetId"` + Profiles []string `json:"profiles"` + Strategy *DeliveryPipelineSerialPipelineStagesStrategy `json:"strategy"` + DeployParameters []DeliveryPipelineSerialPipelineStagesDeployParameters `json:"deployParameters"` +} + +type jsonDeliveryPipelineSerialPipelineStages DeliveryPipelineSerialPipelineStages + +func (r *DeliveryPipelineSerialPipelineStages) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStages + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStages + } else { + + r.TargetId = res.TargetId + + r.Profiles = res.Profiles + + r.Strategy = res.Strategy + + r.DeployParameters = res.DeployParameters + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStages is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStages *DeliveryPipelineSerialPipelineStages = &DeliveryPipelineSerialPipelineStages{empty: true} + +func (r *DeliveryPipelineSerialPipelineStages) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStages) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStages) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategy struct { + empty bool `json:"-"` + Standard *DeliveryPipelineSerialPipelineStagesStrategyStandard `json:"standard"` + Canary *DeliveryPipelineSerialPipelineStagesStrategyCanary `json:"canary"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategy DeliveryPipelineSerialPipelineStagesStrategy + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategy + } else { + + r.Standard = res.Standard + + r.Canary = res.Canary + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategy *DeliveryPipelineSerialPipelineStagesStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandard struct { + empty bool `json:"-"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandard DeliveryPipelineSerialPipelineStagesStrategyStandard + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandard + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } else { + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandard is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard *DeliveryPipelineSerialPipelineStagesStrategyStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanary struct { + empty bool `json:"-"` + RuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig `json:"runtimeConfig"` + CanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment `json:"canaryDeployment"` + CustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment `json:"customCanaryDeployment"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanary DeliveryPipelineSerialPipelineStagesStrategyCanary + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanary + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } else { + + r.RuntimeConfig = res.RuntimeConfig + + r.CanaryDeployment = res.CanaryDeployment + + r.CustomCanaryDeployment = res.CustomCanaryDeployment + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanary is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary *DeliveryPipelineSerialPipelineStagesStrategyCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig struct { + empty bool `json:"-"` + Kubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes `json:"kubernetes"` + CloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun `json:"cloudRun"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } else { + + r.Kubernetes = res.Kubernetes + + r.CloudRun = res.CloudRun + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes struct { + empty bool `json:"-"` + GatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh `json:"gatewayServiceMesh"` + ServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking `json:"serviceNetworking"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } else { + + r.GatewayServiceMesh = res.GatewayServiceMesh + + r.ServiceNetworking = res.ServiceNetworking + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh struct { + empty bool `json:"-"` + HttpRoute *string `json:"httpRoute"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` + RouteUpdateWaitTime *string `json:"routeUpdateWaitTime"` + StableCutbackDuration *string `json:"stableCutbackDuration"` + PodSelectorLabel *string `json:"podSelectorLabel"` + RouteDestinations *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations `json:"routeDestinations"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } else { + + r.HttpRoute = res.HttpRoute + + r.Service = res.Service + + r.Deployment = res.Deployment + + r.RouteUpdateWaitTime = res.RouteUpdateWaitTime + + r.StableCutbackDuration = res.StableCutbackDuration + + r.PodSelectorLabel = res.PodSelectorLabel + + r.RouteDestinations = res.RouteDestinations + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations struct { + empty bool `json:"-"` + DestinationIds []string `json:"destinationIds"` + PropagateService *bool `json:"propagateService"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } else { + + r.DestinationIds = res.DestinationIds + + r.PropagateService = res.PropagateService + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking struct { + empty bool `json:"-"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` + DisablePodOverprovisioning *bool `json:"disablePodOverprovisioning"` + PodSelectorLabel *string `json:"podSelectorLabel"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } else { + + r.Service = res.Service + + r.Deployment = res.Deployment + + r.DisablePodOverprovisioning = res.DisablePodOverprovisioning + + r.PodSelectorLabel = res.PodSelectorLabel + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun struct { + empty bool `json:"-"` + AutomaticTrafficControl *bool `json:"automaticTrafficControl"` + CanaryRevisionTags []string `json:"canaryRevisionTags"` + PriorRevisionTags []string `json:"priorRevisionTags"` + StableRevisionTags []string `json:"stableRevisionTags"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } else { + + r.AutomaticTrafficControl = res.AutomaticTrafficControl + + r.CanaryRevisionTags = res.CanaryRevisionTags + + r.PriorRevisionTags = res.PriorRevisionTags + + r.StableRevisionTags = res.StableRevisionTags + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment struct { + empty bool `json:"-"` + Percentages []int64 `json:"percentages"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } else { + + r.Percentages = res.Percentages + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment struct { + empty bool `json:"-"` + PhaseConfigs []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs `json:"phaseConfigs"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } else { + + r.PhaseConfigs = res.PhaseConfigs + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs struct { + empty bool `json:"-"` + PhaseId *string `json:"phaseId"` + Percentage *int64 `json:"percentage"` + Profiles []string `json:"profiles"` + Verify *bool `json:"verify"` + Predeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy `json:"predeploy"` + Postdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy `json:"postdeploy"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } else { + + r.PhaseId = res.PhaseId + + r.Percentage = res.Percentage + + r.Profiles = res.Profiles + + r.Verify = res.Verify + + r.Predeploy = res.Predeploy + + r.Postdeploy = res.Postdeploy + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy struct { + empty bool `json:"-"` + Actions []string `json:"actions"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } else { + + r.Actions = res.Actions + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesDeployParameters struct { + empty bool `json:"-"` + Values map[string]string `json:"values"` + MatchTargetLabels map[string]string `json:"matchTargetLabels"` +} + +type jsonDeliveryPipelineSerialPipelineStagesDeployParameters DeliveryPipelineSerialPipelineStagesDeployParameters + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesDeployParameters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } else { + + r.Values = res.Values + + r.MatchTargetLabels = res.MatchTargetLabels + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesDeployParameters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesDeployParameters *DeliveryPipelineSerialPipelineStagesDeployParameters = &DeliveryPipelineSerialPipelineStagesDeployParameters{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineCondition struct { + empty bool `json:"-"` + PipelineReadyCondition *DeliveryPipelineConditionPipelineReadyCondition `json:"pipelineReadyCondition"` + TargetsPresentCondition *DeliveryPipelineConditionTargetsPresentCondition `json:"targetsPresentCondition"` + TargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition `json:"targetsTypeCondition"` +} + +type jsonDeliveryPipelineCondition DeliveryPipelineCondition + +func (r *DeliveryPipelineCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineCondition + } else { + + r.PipelineReadyCondition = res.PipelineReadyCondition + + r.TargetsPresentCondition = res.TargetsPresentCondition + + r.TargetsTypeCondition = res.TargetsTypeCondition + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineCondition *DeliveryPipelineCondition = &DeliveryPipelineCondition{empty: true} + +func (r *DeliveryPipelineCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionPipelineReadyCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + UpdateTime *string `json:"updateTime"` +} + +type jsonDeliveryPipelineConditionPipelineReadyCondition DeliveryPipelineConditionPipelineReadyCondition + +func (r *DeliveryPipelineConditionPipelineReadyCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionPipelineReadyCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionPipelineReadyCondition + } else { + + r.Status = res.Status + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionPipelineReadyCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionPipelineReadyCondition *DeliveryPipelineConditionPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{empty: true} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionPipelineReadyCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionTargetsPresentCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + MissingTargets []string `json:"missingTargets"` + UpdateTime *string `json:"updateTime"` +} + +type jsonDeliveryPipelineConditionTargetsPresentCondition DeliveryPipelineConditionTargetsPresentCondition + +func (r *DeliveryPipelineConditionTargetsPresentCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionTargetsPresentCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionTargetsPresentCondition + } else { + + r.Status = res.Status + + r.MissingTargets = res.MissingTargets + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionTargetsPresentCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionTargetsPresentCondition *DeliveryPipelineConditionTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{empty: true} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionTargetsPresentCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineConditionTargetsTypeCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + ErrorDetails *string `json:"errorDetails"` +} + +type jsonDeliveryPipelineConditionTargetsTypeCondition DeliveryPipelineConditionTargetsTypeCondition + +func (r *DeliveryPipelineConditionTargetsTypeCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionTargetsTypeCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionTargetsTypeCondition + } else { + + r.Status = res.Status + + r.ErrorDetails = res.ErrorDetails + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionTargetsTypeCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionTargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{empty: true} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *DeliveryPipeline) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "clouddeploy", + Type: "DeliveryPipeline", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "clouddeploy", +{{- end }} + } +} + +func (r *DeliveryPipeline) ID() (string, error) { + if err := extractDeliveryPipelineFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "description": dcl.ValueOrEmptyString(nr.Description), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "serial_pipeline": dcl.ValueOrEmptyString(nr.SerialPipeline), + "condition": dcl.ValueOrEmptyString(nr.Condition), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "suspended": dcl.ValueOrEmptyString(nr.Suspended), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const DeliveryPipelineMaxPage = -1 + +type DeliveryPipelineList struct { + Items []*DeliveryPipeline + + nextToken string + + pageSize int32 + + resource *DeliveryPipeline +} + +func (l *DeliveryPipelineList) HasNext() bool { + return l.nextToken != "" +} + +func (l *DeliveryPipelineList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listDeliveryPipeline(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListDeliveryPipeline(ctx context.Context, project, location string) (*DeliveryPipelineList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListDeliveryPipelineWithMaxResults(ctx, project, location, DeliveryPipelineMaxPage) + +} + +func (c *Client) ListDeliveryPipelineWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*DeliveryPipelineList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &DeliveryPipeline{ + Project: &project, + Location: &location, + } + items, token, err := c.listDeliveryPipeline(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &DeliveryPipelineList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetDeliveryPipeline(ctx context.Context, r *DeliveryPipeline) (*DeliveryPipeline, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractDeliveryPipelineFields(r) + + b, err := c.getDeliveryPipelineRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalDeliveryPipeline(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeDeliveryPipelineNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractDeliveryPipelineFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteDeliveryPipeline(ctx context.Context, r *DeliveryPipeline) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("DeliveryPipeline resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting DeliveryPipeline...") + deleteOp := deleteDeliveryPipelineOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllDeliveryPipeline deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllDeliveryPipeline(ctx context.Context, project, location string, filter func(*DeliveryPipeline) bool) error { + listObj, err := c.ListDeliveryPipeline(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllDeliveryPipeline(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllDeliveryPipeline(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyDeliveryPipeline(ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *DeliveryPipeline + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyDeliveryPipelineHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyDeliveryPipelineHelper(c *Client, ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyDeliveryPipeline...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractDeliveryPipelineFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.deliveryPipelineDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToDeliveryPipelineDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []deliveryPipelineApiOperation + if create { + ops = append(ops, &createDeliveryPipelineOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyDeliveryPipelineDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyDeliveryPipelineDiff(c *Client, ctx context.Context, desired *DeliveryPipeline, rawDesired *DeliveryPipeline, ops []deliveryPipelineApiOperation, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetDeliveryPipeline(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createDeliveryPipelineOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapDeliveryPipeline(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeDeliveryPipelineNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeDeliveryPipelineNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeDeliveryPipelineDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractDeliveryPipelineFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractDeliveryPipelineFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffDeliveryPipeline(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *DeliveryPipeline) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl new file mode 100644 index 000000000000..fec0faa4833f --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl @@ -0,0 +1,9009 @@ +package clouddeploy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *DeliveryPipeline) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SerialPipeline) { + if err := r.SerialPipeline.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Condition) { + if err := r.Condition.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipeline) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStages) validate() error { + if !dcl.IsEmptyValueIndirect(r.Strategy) { + if err := r.Strategy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategy) validate() error { + if !dcl.IsEmptyValueIndirect(r.Standard) { + if err := r.Standard.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Canary) { + if err := r.Canary.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) validate() error { + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"CanaryDeployment", "CustomCanaryDeployment"}, r.CanaryDeployment, r.CustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RuntimeConfig) { + if err := r.RuntimeConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CanaryDeployment) { + if err := r.CanaryDeployment.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CustomCanaryDeployment) { + if err := r.CustomCanaryDeployment.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Kubernetes", "CloudRun"}, r.Kubernetes, r.CloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Kubernetes) { + if err := r.Kubernetes.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CloudRun) { + if err := r.CloudRun.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GatewayServiceMesh", "ServiceNetworking"}, r.GatewayServiceMesh, r.ServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.GatewayServiceMesh) { + if err := r.GatewayServiceMesh.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServiceNetworking) { + if err := r.ServiceNetworking.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) validate() error { + if err := dcl.Required(r, "httpRoute"); err != nil { + return err + } + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RouteDestinations) { + if err := r.RouteDestinations.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) validate() error { + if err := dcl.Required(r, "destinationIds"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) validate() error { + if err := dcl.Required(r, "percentages"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) validate() error { + if err := dcl.Required(r, "phaseConfigs"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) validate() error { + if err := dcl.Required(r, "phaseId"); err != nil { + return err + } + if err := dcl.Required(r, "percentage"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Predeploy) { + if err := r.Predeploy.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Postdeploy) { + if err := r.Postdeploy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) validate() error { + if err := dcl.Required(r, "values"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineCondition) validate() error { + if !dcl.IsEmptyValueIndirect(r.PipelineReadyCondition) { + if err := r.PipelineReadyCondition.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TargetsPresentCondition) { + if err := r.TargetsPresentCondition.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TargetsTypeCondition) { + if err := r.TargetsTypeCondition.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineConditionPipelineReadyCondition) validate() error { + return nil +} +func (r *DeliveryPipelineConditionTargetsPresentCondition) validate() error { + return nil +} +func (r *DeliveryPipelineConditionTargetsTypeCondition) validate() error { + return nil +} +func (r *DeliveryPipeline) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) +} + +func (r *DeliveryPipeline) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *DeliveryPipeline) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines", nr.basePath(), userBasePath, params), nil + +} + +func (r *DeliveryPipeline) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines?deliveryPipelineId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *DeliveryPipeline) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *DeliveryPipeline) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *DeliveryPipeline) SetPolicyVerb() string { + return "" +} + +func (r *DeliveryPipeline) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *DeliveryPipeline) IAMPolicyVersion() int { + return 3 +} + +// deliveryPipelineApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type deliveryPipelineApiOperation interface { + do(context.Context, *DeliveryPipeline, *Client) error +} + +// newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest creates a request for an +// DeliveryPipeline resource's UpdateDeliveryPipeline update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(ctx context.Context, f *DeliveryPipeline, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { + return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["serialPipeline"] = v + } + if v, err := expandDeliveryPipelineCondition(c, f.Condition, res); err != nil { + return nil, fmt.Errorf("error expanding Condition into condition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["condition"] = v + } + if v := f.Suspended; !dcl.IsEmptyValueIndirect(v) { + req["suspended"] = v + } + b, err := c.getDeliveryPipelineRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/deliveryPipelines/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest converts the update into +// the final JSON request body. +func marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateDeliveryPipelineUpdateDeliveryPipelineOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateDeliveryPipelineUpdateDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + _, err := c.GetDeliveryPipeline(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateDeliveryPipeline") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateDeliveryPipelineUpdateDeliveryPipelineRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listDeliveryPipelineRaw(ctx context.Context, r *DeliveryPipeline, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != DeliveryPipelineMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listDeliveryPipelineOperation struct { + DeliveryPipelines []map[string]interface{} `json:"deliveryPipelines"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listDeliveryPipeline(ctx context.Context, r *DeliveryPipeline, pageToken string, pageSize int32) ([]*DeliveryPipeline, string, error) { + b, err := c.listDeliveryPipelineRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listDeliveryPipelineOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*DeliveryPipeline + for _, v := range m.DeliveryPipelines { + res, err := unmarshalMapDeliveryPipeline(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllDeliveryPipeline(ctx context.Context, f func(*DeliveryPipeline) bool, resources []*DeliveryPipeline) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteDeliveryPipeline(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteDeliveryPipelineOperation struct{} + +func (op *deleteDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + r, err := c.GetDeliveryPipeline(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "DeliveryPipeline not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetDeliveryPipeline checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + u, err = dcl.AddQueryParams(u, map[string]string{"force": "true"}) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetDeliveryPipeline(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createDeliveryPipelineOperation struct { + response map[string]interface{} +} + +func (op *createDeliveryPipelineOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createDeliveryPipelineOperation) do(ctx context.Context, r *DeliveryPipeline, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetDeliveryPipeline(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getDeliveryPipelineRaw(ctx context.Context, r *DeliveryPipeline) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) deliveryPipelineDiffsForRawDesired(ctx context.Context, rawDesired *DeliveryPipeline, opts ...dcl.ApplyOption) (initial, desired *DeliveryPipeline, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *DeliveryPipeline + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*DeliveryPipeline); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected DeliveryPipeline, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetDeliveryPipeline(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a DeliveryPipeline resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve DeliveryPipeline resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that DeliveryPipeline resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for DeliveryPipeline: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for DeliveryPipeline: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractDeliveryPipelineFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeDeliveryPipelineInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for DeliveryPipeline: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for DeliveryPipeline: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffDeliveryPipeline(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeDeliveryPipelineInitialState(rawInitial, rawDesired *DeliveryPipeline) (*DeliveryPipeline, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeDeliveryPipelineDesiredState(rawDesired, rawInitial *DeliveryPipeline, opts ...dcl.ApplyOption) (*DeliveryPipeline, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.SerialPipeline = canonicalizeDeliveryPipelineSerialPipeline(rawDesired.SerialPipeline, nil, opts...) + rawDesired.Condition = canonicalizeDeliveryPipelineCondition(rawDesired.Condition, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &DeliveryPipeline{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.SerialPipeline = canonicalizeDeliveryPipelineSerialPipeline(rawDesired.SerialPipeline, rawInitial.SerialPipeline, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.BoolCanonicalize(rawDesired.Suspended, rawInitial.Suspended) { + canonicalDesired.Suspended = rawInitial.Suspended + } else { + canonicalDesired.Suspended = rawDesired.Suspended + } + return canonicalDesired, nil +} + +func canonicalizeDeliveryPipelineNewState(c *Client, rawNew, rawDesired *DeliveryPipeline) (*DeliveryPipeline, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.SerialPipeline) && dcl.IsEmptyValueIndirect(rawDesired.SerialPipeline) { + rawNew.SerialPipeline = rawDesired.SerialPipeline + } else { + rawNew.SerialPipeline = canonicalizeNewDeliveryPipelineSerialPipeline(c, rawDesired.SerialPipeline, rawNew.SerialPipeline) + } + + if dcl.IsEmptyValueIndirect(rawNew.Condition) && dcl.IsEmptyValueIndirect(rawDesired.Condition) { + rawNew.Condition = rawDesired.Condition + } else { + rawNew.Condition = canonicalizeNewDeliveryPipelineCondition(c, rawDesired.Condition, rawNew.Condition) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Suspended) && dcl.IsEmptyValueIndirect(rawDesired.Suspended) { + rawNew.Suspended = rawDesired.Suspended + } else { + if dcl.BoolCanonicalize(rawDesired.Suspended, rawNew.Suspended) { + rawNew.Suspended = rawDesired.Suspended + } + } + + return rawNew, nil +} + +func canonicalizeDeliveryPipelineSerialPipeline(des, initial *DeliveryPipelineSerialPipeline, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipeline { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipeline{} + + cDes.Stages = canonicalizeDeliveryPipelineSerialPipelineStagesSlice(des.Stages, initial.Stages, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineSlice(des, initial []DeliveryPipelineSerialPipeline, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipeline { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipeline, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipeline(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipeline, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipeline(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipeline(c *Client, des, nw *DeliveryPipelineSerialPipeline) *DeliveryPipelineSerialPipeline { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipeline while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Stages = canonicalizeNewDeliveryPipelineSerialPipelineStagesSlice(c, des.Stages, nw.Stages) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineSet(c *Client, des, nw []DeliveryPipelineSerialPipeline) []DeliveryPipelineSerialPipeline { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipeline + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipeline(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineSlice(c *Client, des, nw []DeliveryPipelineSerialPipeline) []DeliveryPipelineSerialPipeline { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipeline + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipeline(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStages(des, initial *DeliveryPipelineSerialPipelineStages, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStages { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStages{} + + if dcl.StringCanonicalize(des.TargetId, initial.TargetId) || dcl.IsZeroValue(des.TargetId) { + cDes.TargetId = initial.TargetId + } else { + cDes.TargetId = des.TargetId + } + if dcl.StringArrayCanonicalize(des.Profiles, initial.Profiles) { + cDes.Profiles = initial.Profiles + } else { + cDes.Profiles = des.Profiles + } + cDes.Strategy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des.Strategy, initial.Strategy, opts...) + cDes.DeployParameters = canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des.DeployParameters, initial.DeployParameters, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesSlice(des, initial []DeliveryPipelineSerialPipelineStages, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStages(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStages(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStages(c *Client, des, nw *DeliveryPipelineSerialPipelineStages) *DeliveryPipelineSerialPipelineStages { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStages while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.TargetId, nw.TargetId) { + nw.TargetId = des.TargetId + } + if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { + nw.Profiles = des.Profiles + } + nw.Strategy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, des.Strategy, nw.Strategy) + nw.DeployParameters = canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, des.DeployParameters, nw.DeployParameters) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStages) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStages + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStages(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStages) []DeliveryPipelineSerialPipelineStages { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStages + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStages(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des, initial *DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategy{} + + cDes.Standard = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des.Standard, initial.Standard, opts...) + cDes.Canary = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des.Canary, initial.Canary, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategy) *DeliveryPipelineSerialPipelineStagesStrategy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Standard = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, des.Standard, nw.Standard) + nw.Canary = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, des.Canary, nw.Canary) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandard) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandard while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.CanaryDeployment != nil || (initial != nil && initial.CanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CustomCanaryDeployment) { + des.CanaryDeployment = nil + if initial != nil { + initial.CanaryDeployment = nil + } + } + } + + if des.CustomCanaryDeployment != nil || (initial != nil && initial.CustomCanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CanaryDeployment) { + des.CustomCanaryDeployment = nil + if initial != nil { + initial.CustomCanaryDeployment = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + + cDes.RuntimeConfig = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des.RuntimeConfig, initial.RuntimeConfig, opts...) + cDes.CanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des.CanaryDeployment, initial.CanaryDeployment, opts...) + cDes.CustomCanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des.CustomCanaryDeployment, initial.CustomCanaryDeployment, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanary) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanary while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.RuntimeConfig = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, des.RuntimeConfig, nw.RuntimeConfig) + nw.CanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, des.CanaryDeployment, nw.CanaryDeployment) + nw.CustomCanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, des.CustomCanaryDeployment, nw.CustomCanaryDeployment) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.Kubernetes != nil || (initial != nil && initial.Kubernetes != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CloudRun) { + des.Kubernetes = nil + if initial != nil { + initial.Kubernetes = nil + } + } + } + + if des.CloudRun != nil || (initial != nil && initial.CloudRun != nil) { + // Check if anything else is set. + if dcl.AnySet(des.Kubernetes) { + des.CloudRun = nil + if initial != nil { + initial.CloudRun = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + + cDes.Kubernetes = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des.Kubernetes, initial.Kubernetes, opts...) + cDes.CloudRun = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des.CloudRun, initial.CloudRun, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Kubernetes = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, des.Kubernetes, nw.Kubernetes) + nw.CloudRun = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, des.CloudRun, nw.CloudRun) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.GatewayServiceMesh != nil || (initial != nil && initial.GatewayServiceMesh != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ServiceNetworking) { + des.GatewayServiceMesh = nil + if initial != nil { + initial.GatewayServiceMesh = nil + } + } + } + + if des.ServiceNetworking != nil || (initial != nil && initial.ServiceNetworking != nil) { + // Check if anything else is set. + if dcl.AnySet(des.GatewayServiceMesh) { + des.ServiceNetworking = nil + if initial != nil { + initial.ServiceNetworking = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + cDes.GatewayServiceMesh = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des.GatewayServiceMesh, initial.GatewayServiceMesh, opts...) + cDes.ServiceNetworking = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des.ServiceNetworking, initial.ServiceNetworking, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GatewayServiceMesh = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, des.GatewayServiceMesh, nw.GatewayServiceMesh) + nw.ServiceNetworking = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, des.ServiceNetworking, nw.ServiceNetworking) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.StringCanonicalize(des.HttpRoute, initial.HttpRoute) || dcl.IsZeroValue(des.HttpRoute) { + cDes.HttpRoute = initial.HttpRoute + } else { + cDes.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + if dcl.StringCanonicalize(des.RouteUpdateWaitTime, initial.RouteUpdateWaitTime) || dcl.IsZeroValue(des.RouteUpdateWaitTime) { + cDes.RouteUpdateWaitTime = initial.RouteUpdateWaitTime + } else { + cDes.RouteUpdateWaitTime = des.RouteUpdateWaitTime + } + if dcl.StringCanonicalize(des.StableCutbackDuration, initial.StableCutbackDuration) || dcl.IsZeroValue(des.StableCutbackDuration) { + cDes.StableCutbackDuration = initial.StableCutbackDuration + } else { + cDes.StableCutbackDuration = des.StableCutbackDuration + } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } + cDes.RouteDestinations = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(des.RouteDestinations, initial.RouteDestinations, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.HttpRoute, nw.HttpRoute) { + nw.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + if dcl.StringCanonicalize(des.RouteUpdateWaitTime, nw.RouteUpdateWaitTime) { + nw.RouteUpdateWaitTime = des.RouteUpdateWaitTime + } + if dcl.StringCanonicalize(des.StableCutbackDuration, nw.StableCutbackDuration) { + nw.StableCutbackDuration = des.StableCutbackDuration + } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } + nw.RouteDestinations = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, des.RouteDestinations, nw.RouteDestinations) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + + if dcl.StringArrayCanonicalize(des.DestinationIds, initial.DestinationIds) { + cDes.DestinationIds = initial.DestinationIds + } else { + cDes.DestinationIds = des.DestinationIds + } + if dcl.BoolCanonicalize(des.PropagateService, initial.PropagateService) || dcl.IsZeroValue(des.PropagateService) { + cDes.PropagateService = initial.PropagateService + } else { + cDes.PropagateService = des.PropagateService + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.DestinationIds, nw.DestinationIds) { + nw.DestinationIds = des.DestinationIds + } + if dcl.BoolCanonicalize(des.PropagateService, nw.PropagateService) { + nw.PropagateService = des.PropagateService + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, initial.DisablePodOverprovisioning) || dcl.IsZeroValue(des.DisablePodOverprovisioning) { + cDes.DisablePodOverprovisioning = initial.DisablePodOverprovisioning + } else { + cDes.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + if dcl.StringCanonicalize(des.PodSelectorLabel, initial.PodSelectorLabel) || dcl.IsZeroValue(des.PodSelectorLabel) { + cDes.PodSelectorLabel = initial.PodSelectorLabel + } else { + cDes.PodSelectorLabel = des.PodSelectorLabel + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, nw.DisablePodOverprovisioning) { + nw.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + if dcl.StringCanonicalize(des.PodSelectorLabel, nw.PodSelectorLabel) { + nw.PodSelectorLabel = des.PodSelectorLabel + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, initial.AutomaticTrafficControl) || dcl.IsZeroValue(des.AutomaticTrafficControl) { + cDes.AutomaticTrafficControl = initial.AutomaticTrafficControl + } else { + cDes.AutomaticTrafficControl = des.AutomaticTrafficControl + } + if dcl.StringArrayCanonicalize(des.CanaryRevisionTags, initial.CanaryRevisionTags) { + cDes.CanaryRevisionTags = initial.CanaryRevisionTags + } else { + cDes.CanaryRevisionTags = des.CanaryRevisionTags + } + if dcl.StringArrayCanonicalize(des.PriorRevisionTags, initial.PriorRevisionTags) { + cDes.PriorRevisionTags = initial.PriorRevisionTags + } else { + cDes.PriorRevisionTags = des.PriorRevisionTags + } + if dcl.StringArrayCanonicalize(des.StableRevisionTags, initial.StableRevisionTags) { + cDes.StableRevisionTags = initial.StableRevisionTags + } else { + cDes.StableRevisionTags = des.StableRevisionTags + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, nw.AutomaticTrafficControl) { + nw.AutomaticTrafficControl = des.AutomaticTrafficControl + } + if dcl.StringArrayCanonicalize(des.CanaryRevisionTags, nw.CanaryRevisionTags) { + nw.CanaryRevisionTags = des.CanaryRevisionTags + } + if dcl.StringArrayCanonicalize(des.PriorRevisionTags, nw.PriorRevisionTags) { + nw.PriorRevisionTags = des.PriorRevisionTags + } + if dcl.StringArrayCanonicalize(des.StableRevisionTags, nw.StableRevisionTags) { + nw.StableRevisionTags = des.StableRevisionTags + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsZeroValue(des.Percentages) || (dcl.IsEmptyValueIndirect(des.Percentages) && dcl.IsEmptyValueIndirect(initial.Percentages)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentages = initial.Percentages + } else { + cDes.Percentages = des.Percentages + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + + cDes.PhaseConfigs = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des.PhaseConfigs, initial.PhaseConfigs, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PhaseConfigs = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, des.PhaseConfigs, nw.PhaseConfigs) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + + if dcl.StringCanonicalize(des.PhaseId, initial.PhaseId) || dcl.IsZeroValue(des.PhaseId) { + cDes.PhaseId = initial.PhaseId + } else { + cDes.PhaseId = des.PhaseId + } + if dcl.IsZeroValue(des.Percentage) || (dcl.IsEmptyValueIndirect(des.Percentage) && dcl.IsEmptyValueIndirect(initial.Percentage)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentage = initial.Percentage + } else { + cDes.Percentage = des.Percentage + } + if dcl.StringArrayCanonicalize(des.Profiles, initial.Profiles) { + cDes.Profiles = initial.Profiles + } else { + cDes.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + cDes.Predeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(des.Predeploy, initial.Predeploy, opts...) + cDes.Postdeploy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(des.Postdeploy, initial.Postdeploy, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PhaseId, nw.PhaseId) { + nw.PhaseId = des.PhaseId + } + if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { + nw.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + nw.Predeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, des.Predeploy, nw.Predeploy) + nw.Postdeploy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, des.Postdeploy, nw.Postdeploy) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + + if dcl.StringArrayCanonicalize(des.Actions, initial.Actions) { + cDes.Actions = initial.Actions + } else { + cDes.Actions = des.Actions + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Actions, nw.Actions) { + nw.Actions = des.Actions + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(des, initial *DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesDeployParameters{} + + if dcl.IsZeroValue(des.Values) || (dcl.IsEmptyValueIndirect(des.Values) && dcl.IsEmptyValueIndirect(initial.Values)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + if dcl.IsZeroValue(des.MatchTargetLabels) || (dcl.IsEmptyValueIndirect(des.MatchTargetLabels) && dcl.IsEmptyValueIndirect(initial.MatchTargetLabels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MatchTargetLabels = initial.MatchTargetLabels + } else { + cDes.MatchTargetLabels = des.MatchTargetLabels + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des, initial []DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesDeployParameters) *DeliveryPipelineSerialPipelineStagesDeployParameters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesDeployParameters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineCondition(des, initial *DeliveryPipelineCondition, opts ...dcl.ApplyOption) *DeliveryPipelineCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineCondition{} + + cDes.PipelineReadyCondition = canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des.PipelineReadyCondition, initial.PipelineReadyCondition, opts...) + cDes.TargetsPresentCondition = canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des.TargetsPresentCondition, initial.TargetsPresentCondition, opts...) + cDes.TargetsTypeCondition = canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des.TargetsTypeCondition, initial.TargetsTypeCondition, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineCondition, opts ...dcl.ApplyOption) []DeliveryPipelineCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineCondition(c *Client, des, nw *DeliveryPipelineCondition) *DeliveryPipelineCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PipelineReadyCondition = canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, des.PipelineReadyCondition, nw.PipelineReadyCondition) + nw.TargetsPresentCondition = canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, des.TargetsPresentCondition, nw.TargetsPresentCondition) + nw.TargetsTypeCondition = canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, des.TargetsTypeCondition, nw.TargetsTypeCondition) + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionSet(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionSlice(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des, initial *DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionPipelineReadyCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initial []DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionPipelineReadyCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c *Client, des, nw *DeliveryPipelineConditionPipelineReadyCondition) *DeliveryPipelineConditionPipelineReadyCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionPipelineReadyCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSet(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionPipelineReadyCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionPipelineReadyCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des, initial *DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsPresentCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, initial.MissingTargets) { + cDes.MissingTargets = initial.MissingTargets + } else { + cDes.MissingTargets = des.MissingTargets + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, initial []DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsPresentCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsPresentCondition) *DeliveryPipelineConditionTargetsPresentCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsPresentCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, nw.MissingTargets) { + nw.MissingTargets = des.MissingTargets + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsPresentCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsPresentCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des, initial *DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, initial.ErrorDetails) || dcl.IsZeroValue(des.ErrorDetails) { + cDes.ErrorDetails = initial.ErrorDetails + } else { + cDes.ErrorDetails = des.ErrorDetails + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeConditionSlice(des, initial []DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsTypeCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsTypeCondition) *DeliveryPipelineConditionTargetsTypeCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsTypeCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, nw.ErrorDetails) { + nw.ErrorDetails = des.ErrorDetails + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsTypeCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsTypeCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffDeliveryPipeline(c *Client, desired, actual *DeliveryPipeline, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SerialPipeline, actual.SerialPipeline, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipeline, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("SerialPipeline")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Condition, actual.Condition, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareDeliveryPipelineConditionNewStyle, EmptyObject: EmptyDeliveryPipelineCondition, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Condition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Suspended, actual.Suspended, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Suspended")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareDeliveryPipelineSerialPipelineNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipeline) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline or *DeliveryPipelineSerialPipeline", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipeline) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Stages, actual.Stages, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStages, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Stages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStages) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages or *DeliveryPipelineSerialPipelineStages", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStages) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Strategy, actual.Strategy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Strategy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesDeployParameters, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy or *DeliveryPipelineSerialPipelineStagesStrategy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Standard, actual.Standard, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Standard")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Canary, actual.Canary, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Canary")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard or *DeliveryPipelineSerialPipelineStagesStrategyStandard", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary or *DeliveryPipelineSerialPipelineStagesStrategyCanary", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.RuntimeConfig, actual.RuntimeConfig, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RuntimeConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CanaryDeployment, actual.CanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CustomCanaryDeployment, actual.CustomCanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CustomCanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Kubernetes, actual.Kubernetes, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Kubernetes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CloudRun, actual.CloudRun, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CloudRun")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GatewayServiceMesh, actual.GatewayServiceMesh, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("GatewayServiceMesh")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceNetworking, actual.ServiceNetworking, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ServiceNetworking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpRoute, actual.HttpRoute, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("HttpRoute")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteUpdateWaitTime, actual.RouteUpdateWaitTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RouteUpdateWaitTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StableCutbackDuration, actual.StableCutbackDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("StableCutbackDuration")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RouteDestinations, actual.RouteDestinations, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RouteDestinations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DestinationIds, actual.DestinationIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DestinationIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PropagateService, actual.PropagateService, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PropagateService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisablePodOverprovisioning, actual.DisablePodOverprovisioning, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DisablePodOverprovisioning")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodSelectorLabel, actual.PodSelectorLabel, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PodSelectorLabel")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutomaticTrafficControl, actual.AutomaticTrafficControl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("AutomaticTrafficControl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CanaryRevisionTags, actual.CanaryRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CanaryRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PriorRevisionTags, actual.PriorRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PriorRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StableRevisionTags, actual.StableRevisionTags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("StableRevisionTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Percentages, actual.Percentages, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseConfigs, actual.PhaseConfigs, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseId, actual.PhaseId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Percentage, actual.Percentage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Predeploy, actual.Predeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Predeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Postdeploy, actual.Postdeploy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Postdeploy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Actions, actual.Actions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Actions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters or *DeliveryPipelineSerialPipelineStagesDeployParameters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MatchTargetLabels, actual.MatchTargetLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MatchTargetLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition or *DeliveryPipelineCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PipelineReadyCondition, actual.PipelineReadyCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionPipelineReadyConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionPipelineReadyCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PipelineReadyCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsPresentCondition, actual.TargetsPresentCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsPresentConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsPresentCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsPresentCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsTypeCondition, actual.TargetsTypeCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsTypeConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsTypeCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsTypeCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition or *DeliveryPipelineConditionPipelineReadyCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition or *DeliveryPipelineConditionTargetsPresentCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MissingTargets, actual.MissingTargets, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MissingTargets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition or *DeliveryPipelineConditionTargetsTypeCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ErrorDetails, actual.ErrorDetails, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ErrorDetails")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *DeliveryPipeline) urlNormalized() *DeliveryPipeline { + normalized := dcl.Copy(*r).(DeliveryPipeline) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *DeliveryPipeline) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateDeliveryPipeline" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the DeliveryPipeline resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *DeliveryPipeline) marshal(c *Client) ([]byte, error) { + m, err := expandDeliveryPipeline(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling DeliveryPipeline: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalDeliveryPipeline decodes JSON responses into the DeliveryPipeline resource schema. +func unmarshalDeliveryPipeline(b []byte, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapDeliveryPipeline(m, c, res) +} + +func unmarshalMapDeliveryPipeline(m map[string]interface{}, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + + flattened := flattenDeliveryPipeline(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandDeliveryPipeline expands DeliveryPipeline into a JSON request object. +func expandDeliveryPipeline(c *Client, f *DeliveryPipeline) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { + return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serialPipeline"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v := f.Suspended; dcl.ValueShouldBeSent(v) { + m["suspended"] = v + } + + return m, nil +} + +// flattenDeliveryPipeline flattens DeliveryPipeline from a JSON request object into the +// DeliveryPipeline type. +func flattenDeliveryPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &DeliveryPipeline{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.SerialPipeline = flattenDeliveryPipelineSerialPipeline(c, m["serialPipeline"], res) + resultRes.Condition = flattenDeliveryPipelineCondition(c, m["condition"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Suspended = dcl.FlattenBool(m["suspended"]) + + return resultRes +} + +// expandDeliveryPipelineSerialPipelineMap expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineMap(c *Client, f map[string]DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineSlice expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineSlice(c *Client, f []DeliveryPipelineSerialPipeline, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineMap flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipeline { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipeline{} + } + + items := make(map[string]DeliveryPipelineSerialPipeline) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineSlice flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipeline { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipeline{} + } + + items := make([]DeliveryPipelineSerialPipeline, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipeline expands an instance of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipeline(c *Client, f *DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesSlice(c, f.Stages, res); err != nil { + return nil, fmt.Errorf("error expanding Stages into stages: %w", err) + } else if v != nil { + m["stages"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipeline flattens an instance of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipeline{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipeline + } + r.Stages = flattenDeliveryPipelineSerialPipelineStagesSlice(c, m["stages"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesMap expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesSlice expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesSlice(c *Client, f []DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesMap flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStages { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStages) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesSlice flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStages { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStages{} + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStages expands an instance of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStages(c *Client, f *DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetId; !dcl.IsEmptyValueIndirect(v) { + m["targetId"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, f.Strategy, res); err != nil { + return nil, fmt.Errorf("error expanding Strategy into strategy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["strategy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, f.DeployParameters, res); err != nil { + return nil, fmt.Errorf("error expanding DeployParameters into deployParameters: %w", err) + } else if v != nil { + m["deployParameters"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStages flattens an instance of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStages(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStages { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStages{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStages + } + r.TargetId = dcl.FlattenString(m["targetId"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Strategy = flattenDeliveryPipelineSerialPipelineStagesStrategy(c, m["strategy"], res) + r.DeployParameters = flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, m["deployParameters"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategy expands an instance of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, f.Standard, res); err != nil { + return nil, fmt.Errorf("error expanding Standard into standard: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["standard"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, f.Canary, res); err != nil { + return nil, fmt.Errorf("error expanding Canary into canary: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canary"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + r.Standard = flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, m["standard"], res) + r.Canary = flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, m["canary"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandard expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandard flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanary expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, f.RuntimeConfig, res); err != nil { + return nil, fmt.Errorf("error expanding RuntimeConfig into runtimeConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["runtimeConfig"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, f.CanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CanaryDeployment into canaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canaryDeployment"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, f.CustomCanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CustomCanaryDeployment into customCanaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["customCanaryDeployment"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanary flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + r.RuntimeConfig = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, m["runtimeConfig"], res) + r.CanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, m["canaryDeployment"], res) + r.CustomCanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, m["customCanaryDeployment"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, f.Kubernetes, res); err != nil { + return nil, fmt.Errorf("error expanding Kubernetes into kubernetes: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetes"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, f.CloudRun, res); err != nil { + return nil, fmt.Errorf("error expanding CloudRun into cloudRun: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cloudRun"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + r.Kubernetes = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, m["kubernetes"], res) + r.CloudRun = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, m["cloudRun"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, f.GatewayServiceMesh, res); err != nil { + return nil, fmt.Errorf("error expanding GatewayServiceMesh into gatewayServiceMesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gatewayServiceMesh"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, f.ServiceNetworking, res); err != nil { + return nil, fmt.Errorf("error expanding ServiceNetworking into serviceNetworking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serviceNetworking"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + r.GatewayServiceMesh = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, m["gatewayServiceMesh"], res) + r.ServiceNetworking = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, m["serviceNetworking"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HttpRoute; !dcl.IsEmptyValueIndirect(v) { + m["httpRoute"] = v + } + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + if v := f.RouteUpdateWaitTime; !dcl.IsEmptyValueIndirect(v) { + m["routeUpdateWaitTime"] = v + } + if v := f.StableCutbackDuration; !dcl.IsEmptyValueIndirect(v) { + m["stableCutbackDuration"] = v + } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, f.RouteDestinations, res); err != nil { + return nil, fmt.Errorf("error expanding RouteDestinations into routeDestinations: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["routeDestinations"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + r.HttpRoute = dcl.FlattenString(m["httpRoute"]) + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + r.RouteUpdateWaitTime = dcl.FlattenString(m["routeUpdateWaitTime"]) + r.StableCutbackDuration = dcl.FlattenString(m["stableCutbackDuration"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) + r.RouteDestinations = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, m["routeDestinations"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DestinationIds; v != nil { + m["destinationIds"] = v + } + if v := f.PropagateService; !dcl.IsEmptyValueIndirect(v) { + m["propagateService"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + r.DestinationIds = dcl.FlattenStringSlice(m["destinationIds"]) + r.PropagateService = dcl.FlattenBool(m["propagateService"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + if v := f.DisablePodOverprovisioning; !dcl.IsEmptyValueIndirect(v) { + m["disablePodOverprovisioning"] = v + } + if v := f.PodSelectorLabel; !dcl.IsEmptyValueIndirect(v) { + m["podSelectorLabel"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + r.DisablePodOverprovisioning = dcl.FlattenBool(m["disablePodOverprovisioning"]) + r.PodSelectorLabel = dcl.FlattenString(m["podSelectorLabel"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutomaticTrafficControl; !dcl.IsEmptyValueIndirect(v) { + m["automaticTrafficControl"] = v + } + if v := f.CanaryRevisionTags; v != nil { + m["canaryRevisionTags"] = v + } + if v := f.PriorRevisionTags; v != nil { + m["priorRevisionTags"] = v + } + if v := f.StableRevisionTags; v != nil { + m["stableRevisionTags"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + r.AutomaticTrafficControl = dcl.FlattenBool(m["automaticTrafficControl"]) + r.CanaryRevisionTags = dcl.FlattenStringSlice(m["canaryRevisionTags"]) + r.PriorRevisionTags = dcl.FlattenStringSlice(m["priorRevisionTags"]) + r.StableRevisionTags = dcl.FlattenStringSlice(m["stableRevisionTags"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Percentages; v != nil { + m["percentages"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + r.Percentages = dcl.FlattenIntSlice(m["percentages"]) + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, f.PhaseConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding PhaseConfigs into phaseConfigs: %w", err) + } else if v != nil { + m["phaseConfigs"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + r.PhaseConfigs = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, m["phaseConfigs"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.PhaseId; !dcl.IsEmptyValueIndirect(v) { + m["phaseId"] = v + } + if v := f.Percentage; !dcl.IsEmptyValueIndirect(v) { + m["percentage"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, f.Predeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Predeploy into predeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["predeploy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, f.Postdeploy, res); err != nil { + return nil, fmt.Errorf("error expanding Postdeploy into postdeploy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["postdeploy"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } + r.PhaseId = dcl.FlattenString(m["phaseId"]) + r.Percentage = dcl.FlattenInteger(m["percentage"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Verify = dcl.FlattenBool(m["verify"]) + r.Predeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, m["predeploy"], res) + r.Postdeploy = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, m["postdeploy"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Actions; v != nil { + m["actions"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + r.Actions = dcl.FlattenStringSlice(m["actions"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParametersMap expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesDeployParameters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesDeployParameters) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesDeployParameters { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesDeployParameters expands an instance of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, f *DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Values; !dcl.IsEmptyValueIndirect(v) { + m["values"] = v + } + if v := f.MatchTargetLabels; !dcl.IsEmptyValueIndirect(v) { + m["matchTargetLabels"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesDeployParameters flattens an instance of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesDeployParameters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesDeployParameters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + r.Values = dcl.FlattenKeyValuePairs(m["values"]) + r.MatchTargetLabels = dcl.FlattenKeyValuePairs(m["matchTargetLabels"]) + + return r +} + +// expandDeliveryPipelineConditionMap expands the contents of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineConditionMap(c *Client, f map[string]DeliveryPipelineCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionSlice expands the contents of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineConditionSlice(c *Client, f []DeliveryPipelineCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionMap flattens the contents of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineCondition{} + } + + items := make(map[string]DeliveryPipelineCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionSlice flattens the contents of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineCondition{} + } + + items := make([]DeliveryPipelineCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineCondition expands an instance of DeliveryPipelineCondition into a JSON +// request object. +func expandDeliveryPipelineCondition(c *Client, f *DeliveryPipelineCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, f.PipelineReadyCondition, res); err != nil { + return nil, fmt.Errorf("error expanding PipelineReadyCondition into pipelineReadyCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pipelineReadyCondition"] = v + } + if v, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, f.TargetsPresentCondition, res); err != nil { + return nil, fmt.Errorf("error expanding TargetsPresentCondition into targetsPresentCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["targetsPresentCondition"] = v + } + if v, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, f.TargetsTypeCondition, res); err != nil { + return nil, fmt.Errorf("error expanding TargetsTypeCondition into targetsTypeCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["targetsTypeCondition"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineCondition flattens an instance of DeliveryPipelineCondition from a JSON +// response object. +func flattenDeliveryPipelineCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineCondition + } + r.PipelineReadyCondition = flattenDeliveryPipelineConditionPipelineReadyCondition(c, m["pipelineReadyCondition"], res) + r.TargetsPresentCondition = flattenDeliveryPipelineConditionTargetsPresentCondition(c, m["targetsPresentCondition"], res) + r.TargetsTypeCondition = flattenDeliveryPipelineConditionTargetsTypeCondition(c, m["targetsTypeCondition"], res) + + return r +} + +// expandDeliveryPipelineConditionPipelineReadyConditionMap expands the contents of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyConditionMap(c *Client, f map[string]DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionPipelineReadyConditionSlice expands the contents of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, f []DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionPipelineReadyCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionPipelineReadyConditionMap flattens the contents of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionPipelineReadyCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionPipelineReadyCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionPipelineReadyCondition{} + } + + items := make(map[string]DeliveryPipelineConditionPipelineReadyCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionPipelineReadyCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionPipelineReadyConditionSlice flattens the contents of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionPipelineReadyCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionPipelineReadyCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionPipelineReadyCondition{} + } + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionPipelineReadyCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionPipelineReadyCondition expands an instance of DeliveryPipelineConditionPipelineReadyCondition into a JSON +// request object. +func expandDeliveryPipelineConditionPipelineReadyCondition(c *Client, f *DeliveryPipelineConditionPipelineReadyCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionPipelineReadyCondition flattens an instance of DeliveryPipelineConditionPipelineReadyCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionPipelineReadyCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionPipelineReadyCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionPipelineReadyCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionPipelineReadyCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandDeliveryPipelineConditionTargetsPresentConditionMap expands the contents of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentConditionMap(c *Client, f map[string]DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionTargetsPresentConditionSlice expands the contents of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, f []DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionTargetsPresentCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionTargetsPresentConditionMap flattens the contents of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionTargetsPresentCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionTargetsPresentCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionTargetsPresentCondition{} + } + + items := make(map[string]DeliveryPipelineConditionTargetsPresentCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionTargetsPresentCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionTargetsPresentConditionSlice flattens the contents of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionTargetsPresentCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionTargetsPresentCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionTargetsPresentCondition{} + } + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionTargetsPresentCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionTargetsPresentCondition expands an instance of DeliveryPipelineConditionTargetsPresentCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsPresentCondition(c *Client, f *DeliveryPipelineConditionTargetsPresentCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.MissingTargets; v != nil { + m["missingTargets"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionTargetsPresentCondition flattens an instance of DeliveryPipelineConditionTargetsPresentCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsPresentCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionTargetsPresentCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionTargetsPresentCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionTargetsPresentCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.MissingTargets = dcl.FlattenStringSlice(m["missingTargets"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandDeliveryPipelineConditionTargetsTypeConditionMap expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, f map[string]DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionTargetsTypeConditionSlice expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, f []DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionMap flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make(map[string]DeliveryPipelineConditionTargetsTypeCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionSlice flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionTargetsTypeCondition expands an instance of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeCondition(c *Client, f *DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.ErrorDetails; !dcl.IsEmptyValueIndirect(v) { + m["errorDetails"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeCondition flattens an instance of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionTargetsTypeCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionTargetsTypeCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.ErrorDetails = dcl.FlattenString(m["errorDetails"]) + + return r +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *DeliveryPipeline) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalDeliveryPipeline(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type deliveryPipelineDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp deliveryPipelineApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToDeliveryPipelineDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]deliveryPipelineDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []deliveryPipelineDiff + // For each operation name, create a deliveryPipelineDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := deliveryPipelineDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToDeliveryPipelineApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToDeliveryPipelineApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (deliveryPipelineApiOperation, error) { + switch opName { + + case "updateDeliveryPipelineUpdateDeliveryPipelineOperation": + return &updateDeliveryPipelineUpdateDeliveryPipelineOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractDeliveryPipelineFields(r *DeliveryPipeline) error { + vSerialPipeline := r.SerialPipeline + if vSerialPipeline == nil { + // note: explicitly not the empty object. + vSerialPipeline = &DeliveryPipelineSerialPipeline{} + } + if err := extractDeliveryPipelineSerialPipelineFields(r, vSerialPipeline); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSerialPipeline) { + r.SerialPipeline = vSerialPipeline + } + vCondition := r.Condition + if vCondition == nil { + // note: explicitly not the empty object. + vCondition = &DeliveryPipelineCondition{} + } + if err := extractDeliveryPipelineConditionFields(r, vCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCondition) { + r.Condition = vCondition + } + return nil +} +func extractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipeline) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + vRouteDestinations := o.RouteDestinations + if vRouteDestinations == nil { + // note: explicitly not the empty object. + vRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r, vRouteDestinations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRouteDestinations) { + o.RouteDestinations = vRouteDestinations + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { + return nil +} +func extractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { + vPipelineReadyCondition := o.PipelineReadyCondition + if vPipelineReadyCondition == nil { + // note: explicitly not the empty object. + vPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{} + } + if err := extractDeliveryPipelineConditionPipelineReadyConditionFields(r, vPipelineReadyCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPipelineReadyCondition) { + o.PipelineReadyCondition = vPipelineReadyCondition + } + vTargetsPresentCondition := o.TargetsPresentCondition + if vTargetsPresentCondition == nil { + // note: explicitly not the empty object. + vTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{} + } + if err := extractDeliveryPipelineConditionTargetsPresentConditionFields(r, vTargetsPresentCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { + o.TargetsPresentCondition = vTargetsPresentCondition + } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } + return nil +} +func extractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { + return nil +} +func extractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { + return nil +} +func extractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} + +func postReadExtractDeliveryPipelineFields(r *DeliveryPipeline) error { + vSerialPipeline := r.SerialPipeline + if vSerialPipeline == nil { + // note: explicitly not the empty object. + vSerialPipeline = &DeliveryPipelineSerialPipeline{} + } + if err := postReadExtractDeliveryPipelineSerialPipelineFields(r, vSerialPipeline); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSerialPipeline) { + r.SerialPipeline = vSerialPipeline + } + vCondition := r.Condition + if vCondition == nil { + // note: explicitly not the empty object. + vCondition = &DeliveryPipelineCondition{} + } + if err := postReadExtractDeliveryPipelineConditionFields(r, vCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCondition) { + r.Condition = vCondition + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipeline) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + vRouteDestinations := o.RouteDestinations + if vRouteDestinations == nil { + // note: explicitly not the empty object. + vRouteDestinations = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r, vRouteDestinations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRouteDestinations) { + o.RouteDestinations = vRouteDestinations + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + vPredeploy := o.Predeploy + if vPredeploy == nil { + // note: explicitly not the empty object. + vPredeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r, vPredeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPredeploy) { + o.Predeploy = vPredeploy + } + vPostdeploy := o.Postdeploy + if vPostdeploy == nil { + // note: explicitly not the empty object. + vPostdeploy = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r, vPostdeploy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPostdeploy) { + o.Postdeploy = vPostdeploy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeployFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { + return nil +} +func postReadExtractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { + vPipelineReadyCondition := o.PipelineReadyCondition + if vPipelineReadyCondition == nil { + // note: explicitly not the empty object. + vPipelineReadyCondition = &DeliveryPipelineConditionPipelineReadyCondition{} + } + if err := extractDeliveryPipelineConditionPipelineReadyConditionFields(r, vPipelineReadyCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPipelineReadyCondition) { + o.PipelineReadyCondition = vPipelineReadyCondition + } + vTargetsPresentCondition := o.TargetsPresentCondition + if vTargetsPresentCondition == nil { + // note: explicitly not the empty object. + vTargetsPresentCondition = &DeliveryPipelineConditionTargetsPresentCondition{} + } + if err := extractDeliveryPipelineConditionTargetsPresentConditionFields(r, vTargetsPresentCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { + o.TargetsPresentCondition = vTargetsPresentCondition + } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } + return nil +} +func postReadExtractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { + return nil +} +func postReadExtractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { + return nil +} +func postReadExtractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl new file mode 100644 index 000000000000..052fa6a83496 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl @@ -0,0 +1,753 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLDeliveryPipelineSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Clouddeploy/DeliveryPipeline", + Description: "The Cloud Deploy `DeliveryPipeline` resource", + StructName: "DeliveryPipeline", + Reference: &dcl.Link{ + Text: "REST API", + URL: "https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines", + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a DeliveryPipeline", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "deliveryPipeline", + Required: true, + Description: "A full instance of a DeliveryPipeline", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a DeliveryPipeline", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "deliveryPipeline", + Required: true, + Description: "A full instance of a DeliveryPipeline", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a DeliveryPipeline", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "deliveryPipeline", + Required: true, + Description: "A full instance of a DeliveryPipeline", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all DeliveryPipeline", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many DeliveryPipeline", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "DeliveryPipeline": &dcl.Component{ + Title: "DeliveryPipeline", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + }, + "condition": &dcl.Property{ + Type: "object", + GoName: "Condition", + GoType: "DeliveryPipelineCondition", + ReadOnly: true, + Description: "Output only. Information around the state of the Delivery Pipeline.", + Properties: map[string]*dcl.Property{ + "pipelineReadyCondition": &dcl.Property{ + Type: "object", + GoName: "PipelineReadyCondition", + GoType: "DeliveryPipelineConditionPipelineReadyCondition", + Description: "Details around the Pipeline's overall status.", + Properties: map[string]*dcl.Property{ + "status": &dcl.Property{ + Type: "boolean", + GoName: "Status", + Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last time the condition was updated.", + }, + }, + }, + "targetsPresentCondition": &dcl.Property{ + Type: "object", + GoName: "TargetsPresentCondition", + GoType: "DeliveryPipelineConditionTargetsPresentCondition", + Description: "Details around targets enumerated in the pipeline.", + Properties: map[string]*dcl.Property{ + "missingTargets": &dcl.Property{ + Type: "array", + GoName: "MissingTargets", + Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Clouddeploy/Target", + Field: "selfLink", + }, + }, + }, + }, + "status": &dcl.Property{ + Type: "boolean", + GoName: "Status", + Description: "True if there aren't any missing Targets.", + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last time the condition was updated.", + }, + }, + }, + "targetsTypeCondition": &dcl.Property{ + Type: "object", + GoName: "TargetsTypeCondition", + GoType: "DeliveryPipelineConditionTargetsTypeCondition", + Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", + Properties: map[string]*dcl.Property{ + "errorDetails": &dcl.Property{ + Type: "string", + GoName: "ErrorDetails", + Description: "Human readable error message.", + }, + "status": &dcl.Property{ + Type: "boolean", + GoName: "Status", + Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", + }, + }, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Time at which the pipeline was created.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + Immutable: true, + Parameter: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "serialPipeline": &dcl.Property{ + Type: "object", + GoName: "SerialPipeline", + GoType: "DeliveryPipelineSerialPipeline", + Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", + Properties: map[string]*dcl.Property{ + "stages": &dcl.Property{ + Type: "array", + GoName: "Stages", + Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "DeliveryPipelineSerialPipelineStages", + Properties: map[string]*dcl.Property{ + "deployParameters": &dcl.Property{ + Type: "array", + GoName: "DeployParameters", + Description: "Optional. The deploy parameters to use for the target in this stage.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "DeliveryPipelineSerialPipelineStagesDeployParameters", + Required: []string{ + "values", + }, + Properties: map[string]*dcl.Property{ + "matchTargetLabels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "MatchTargetLabels", + Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", + }, + "values": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Values", + Description: "Required. Values are deploy parameters in key-value pairs.", + }, + }, + }, + }, + "profiles": &dcl.Property{ + Type: "array", + GoName: "Profiles", + Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "strategy": &dcl.Property{ + Type: "object", + GoName: "Strategy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategy", + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + Properties: map[string]*dcl.Property{ + "canary": &dcl.Property{ + Type: "object", + GoName: "Canary", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanary", + Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", + Properties: map[string]*dcl.Property{ + "canaryDeployment": &dcl.Property{ + Type: "object", + GoName: "CanaryDeployment", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", + Description: "Configures the progressive based deployment for a Target.", + Conflicts: []string{ + "customCanaryDeployment", + }, + Required: []string{ + "percentages", + }, + Properties: map[string]*dcl.Property{ + "percentages": &dcl.Property{ + Type: "array", + GoName: "Percentages", + Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "integer", + Format: "int64", + GoType: "int64", + }, + }, + "postdeploy": &dcl.Property{ + Type: "object", + GoName: "Postdeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", + Description: "Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "predeploy": &dcl.Property{ + Type: "object", + GoName: "Predeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", + Description: "Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to run verify tests after each percentage deployment.", + }, + }, + }, + "customCanaryDeployment": &dcl.Property{ + Type: "object", + GoName: "CustomCanaryDeployment", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", + Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", + Conflicts: []string{ + "canaryDeployment", + }, + Required: []string{ + "phaseConfigs", + }, + Properties: map[string]*dcl.Property{ + "phaseConfigs": &dcl.Property{ + Type: "array", + GoName: "PhaseConfigs", + Description: "Required. Configuration for each phase in the canary deployment in the order executed.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", + Required: []string{ + "phaseId", + "percentage", + }, + Properties: map[string]*dcl.Property{ + "percentage": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Percentage", + Description: "Required. Percentage deployment for the phase.", + }, + "phaseId": &dcl.Property{ + Type: "string", + GoName: "PhaseId", + Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + "postdeploy": &dcl.Property{ + Type: "object", + GoName: "Postdeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", + Description: "Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "predeploy": &dcl.Property{ + Type: "object", + GoName: "Predeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", + Description: "Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "profiles": &dcl.Property{ + Type: "array", + GoName: "Profiles", + Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to run verify tests after the deployment.", + }, + }, + }, + }, + }, + }, + "runtimeConfig": &dcl.Property{ + Type: "object", + GoName: "RuntimeConfig", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", + Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", + Properties: map[string]*dcl.Property{ + "cloudRun": &dcl.Property{ + Type: "object", + GoName: "CloudRun", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", + Description: "Cloud Run runtime configuration.", + Conflicts: []string{ + "kubernetes", + }, + Properties: map[string]*dcl.Property{ + "automaticTrafficControl": &dcl.Property{ + Type: "boolean", + GoName: "AutomaticTrafficControl", + Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", + }, + "canaryRevisionTags": &dcl.Property{ + Type: "array", + GoName: "CanaryRevisionTags", + Description: "Optional. A list of tags that are added to the canary revision while the canary phase is in progress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "priorRevisionTags": &dcl.Property{ + Type: "array", + GoName: "PriorRevisionTags", + Description: "Optional. A list of tags that are added to the prior revision while the canary phase is in progress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "stableRevisionTags": &dcl.Property{ + Type: "array", + GoName: "StableRevisionTags", + Description: "Optional. A list of tags that are added to the final stable revision when the stable phase is applied.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "kubernetes": &dcl.Property{ + Type: "object", + GoName: "Kubernetes", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", + Description: "Kubernetes runtime configuration.", + Conflicts: []string{ + "cloudRun", + }, + Properties: map[string]*dcl.Property{ + "gatewayServiceMesh": &dcl.Property{ + Type: "object", + GoName: "GatewayServiceMesh", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", + Description: "Kubernetes Gateway API service mesh configuration.", + Conflicts: []string{ + "serviceNetworking", + }, + Required: []string{ + "httpRoute", + "service", + "deployment", + }, + Properties: map[string]*dcl.Property{ + "deployment": &dcl.Property{ + Type: "string", + GoName: "Deployment", + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", + }, + "httpRoute": &dcl.Property{ + Type: "string", + GoName: "HttpRoute", + Description: "Required. Name of the Gateway API HTTPRoute.", + }, + "podSelectorLabel": &dcl.Property{ + Type: "string", + GoName: "PodSelectorLabel", + Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", + }, + "routeDestinations": &dcl.Property{ + Type: "object", + GoName: "RouteDestinations", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", + Description: "Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster.", + Required: []string{ + "destinationIds", + }, + Properties: map[string]*dcl.Property{ + "destinationIds": &dcl.Property{ + Type: "array", + GoName: "DestinationIds", + Description: "Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and \"@self\" to include the Target cluster.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "propagateService": &dcl.Property{ + Type: "boolean", + GoName: "PropagateService", + Description: "Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified.", + }, + }, + }, + "routeUpdateWaitTime": &dcl.Property{ + Type: "string", + GoName: "RouteUpdateWaitTime", + Description: "Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time.", + }, + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. Name of the Kubernetes Service.", + }, + "stableCutbackDuration": &dcl.Property{ + Type: "string", + GoName: "StableCutbackDuration", + Description: "Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time.", + }, + }, + }, + "serviceNetworking": &dcl.Property{ + Type: "object", + GoName: "ServiceNetworking", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", + Description: "Kubernetes Service networking configuration.", + Conflicts: []string{ + "gatewayServiceMesh", + }, + Required: []string{ + "service", + "deployment", + }, + Properties: map[string]*dcl.Property{ + "deployment": &dcl.Property{ + Type: "string", + GoName: "Deployment", + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", + }, + "disablePodOverprovisioning": &dcl.Property{ + Type: "boolean", + GoName: "DisablePodOverprovisioning", + Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", + }, + "podSelectorLabel": &dcl.Property{ + Type: "string", + GoName: "PodSelectorLabel", + Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", + }, + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. Name of the Kubernetes Service.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + "standard": &dcl.Property{ + Type: "object", + GoName: "Standard", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandard", + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + Properties: map[string]*dcl.Property{ + "postdeploy": &dcl.Property{ + Type: "object", + GoName: "Postdeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", + Description: "Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "predeploy": &dcl.Property{ + Type: "object", + GoName: "Predeploy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", + Description: "Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present.", + Properties: map[string]*dcl.Property{ + "actions": &dcl.Property{ + Type: "array", + GoName: "Actions", + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to verify a deployment.", + }, + }, + }, + }, + }, + "targetId": &dcl.Property{ + Type: "string", + GoName: "TargetId", + Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", + }, + }, + }, + }, + }, + }, + "suspended": &dcl.Property{ + Type: "boolean", + GoName: "Suspended", + Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. Unique identifier of the `DeliveryPipeline`.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Most recent time at which the pipeline was updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go new file mode 100644 index 000000000000..f42c684b2f3c --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLClouddeployClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ClouddeployBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go new file mode 100644 index 000000000000..68c132f985b1 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go @@ -0,0 +1,1789 @@ +package clouddeploy + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployDeliveryPipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployDeliveryPipelineCreate, + Read: resourceClouddeployDeliveryPipelineRead, + Update: resourceClouddeployDeliveryPipelineUpdate, + Delete: resourceClouddeployDeliveryPipelineDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployDeliveryPipelineImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "serial_pipeline": { + Type: schema.TypeList, + Optional: true, + Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineSchema(), + }, + + "suspended": { + Type: schema.TypeBool, + Optional: true, + Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "condition": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Information around the state of the Delivery Pipeline.", + Elem: ClouddeployDeliveryPipelineConditionSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the pipeline was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique identifier of the `DeliveryPipeline`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Most recent time at which the pipeline was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stages": { + Type: schema.TypeList, + Optional: true, + Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deploy_parameters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The deploy parameters to use for the target in this stage.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema(), + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "strategy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema(), + }, + + "target_id": { + Type: schema.TypeString, + Optional: true, + Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeMap, + Required: true, + Description: "Required. Values are deploy parameters in key-value pairs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "match_target_labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary": { + Type: schema.TypeList, + Optional: true, + Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema(), + }, + + "standard": { + Type: schema.TypeList, + Optional: true, + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema(), + }, + + "custom_canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema(), + }, + + "runtime_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentages": { + Type: schema.TypeList, + Required: true, + Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySchema(), + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after each percentage deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "phase_configs": { + Type: schema.TypeList, + Required: true, + Description: "Required. Configuration for each phase in the canary deployment in the order executed.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentage": { + Type: schema.TypeInt, + Required: true, + Description: "Required. Percentage deployment for the phase.", + }, + + "phase_id": { + Type: schema.TypeString, + Required: true, + Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySchema(), + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after the deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_run": { + Type: schema.TypeList, + Optional: true, + Description: "Cloud Run runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema(), + }, + + "kubernetes": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automatic_traffic_control": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", + }, + + "canary_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the canary revision while the canary phase is in progress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "prior_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the prior revision while the canary phase is in progress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "stable_revision_tags": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A list of tags that are added to the final stable revision when the stable phase is applied.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gateway_service_mesh": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Gateway API service mesh configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema(), + }, + + "service_networking": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Service networking configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", + }, + + "http_route": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Gateway API HTTPRoute.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", + }, + + "route_destinations": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSchema(), + }, + + "route_update_wait_time": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time.", + }, + + "stable_cutback_duration": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_ids": { + Type: schema.TypeList, + Required: true, + Description: "Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and \"@self\" to include the Target cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "propagate_service": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + + "disable_pod_overprovisioning": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", + }, + + "pod_selector_label": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postdeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySchema(), + }, + + "predeploy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySchema(), + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to verify a deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "actions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pipeline_ready_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around the Pipeline's overall status.", + Elem: ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema(), + }, + + "targets_present_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around targets enumerated in the pipeline.", + Elem: ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema(), + }, + + "targets_type_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", + Elem: ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "missing_targets": { + Type: schema.TypeList, + Computed: true, + Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if there aren't any missing Targets.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_details": { + Type: schema.TypeString, + Computed: true, + Description: "Human readable error message.", + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", + }, + }, + } +} + +func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetDeliveryPipeline(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ClouddeployDeliveryPipeline %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("serial_pipeline", flattenClouddeployDeliveryPipelineSerialPipeline(res.SerialPipeline)); err != nil { + return fmt.Errorf("error setting serial_pipeline in state: %s", err) + } + if err = d.Set("suspended", res.Suspended); err != nil { + return fmt.Errorf("error setting suspended in state: %s", err) + } + if err = d.Set("annotations", flattenClouddeployDeliveryPipelineAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("condition", flattenClouddeployDeliveryPipelineCondition(res.Condition)); err != nil { + return fmt.Errorf("error setting condition in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("labels", flattenClouddeployDeliveryPipelineLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("terraform_labels", flattenClouddeployDeliveryPipelineTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceClouddeployDeliveryPipelineUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + log.Printf("[DEBUG] Deleting DeliveryPipeline %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteDeliveryPipeline(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished deleting DeliveryPipeline %q", d.Id()) + return nil +} + +func resourceClouddeployDeliveryPipelineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/deliveryPipelines/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandClouddeployDeliveryPipelineSerialPipeline(o interface{}) *DeliveryPipelineSerialPipeline { + if o == nil { + return EmptyDeliveryPipelineSerialPipeline + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipeline + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipeline{ + Stages: expandClouddeployDeliveryPipelineSerialPipelineStagesArray(obj["stages"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipeline(obj *DeliveryPipelineSerialPipeline) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "stages": flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(obj.Stages), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesArray(o interface{}) []DeliveryPipelineSerialPipelineStages { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStages, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStages, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStages(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStages(o interface{}) *DeliveryPipelineSerialPipelineStages { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStages + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStages{ + DeployParameters: expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj["deploy_parameters"]), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Strategy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj["strategy"]), + TargetId: dcl.String(obj["target_id"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(objs []DeliveryPipelineSerialPipelineStages) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStages(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *DeliveryPipelineSerialPipelineStages) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deploy_parameters": flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj.DeployParameters), + "profiles": obj.Profiles, + "strategy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj.Strategy), + "target_id": obj.TargetId, + } + + return transformed + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(o interface{}) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(o interface{}) *DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesDeployParameters{ + Values: tpgresource.CheckStringMap(obj["values"]), + MatchTargetLabels: tpgresource.CheckStringMap(obj["match_target_labels"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(objs []DeliveryPipelineSerialPipelineStagesDeployParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(obj *DeliveryPipelineSerialPipelineStagesDeployParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + "match_target_labels": obj.MatchTargetLabels, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategy{ + Canary: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj["canary"]), + Standard: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj["standard"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj *DeliveryPipelineSerialPipelineStagesStrategy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj.Canary), + "standard": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj.Standard), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanary{ + CanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj["canary_deployment"]), + CustomCanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj["custom_canary_deployment"]), + RuntimeConfig: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj["runtime_config"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj *DeliveryPipelineSerialPipelineStagesStrategyCanary) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj.CanaryDeployment), + "custom_canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj.CustomCanaryDeployment), + "runtime_config": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj.RuntimeConfig), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{ + Percentages: tpgdclresource.ExpandIntegerArray(obj["percentages"]), + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj["predeploy"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentages": obj.Percentages, + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj.Predeploy), + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{ + PhaseConfigs: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj["phase_configs"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "phase_configs": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj.PhaseConfigs), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(o interface{}) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } + + obj := o.(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{ + Percentage: dcl.Int64(int64(obj["percentage"].(int))), + PhaseId: dcl.String(obj["phase_id"].(string)), + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj["predeploy"]), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(objs []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentage": obj.Percentage, + "phase_id": obj.PhaseId, + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj.Predeploy), + "profiles": obj.Profiles, + "verify": obj.Verify, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{ + CloudRun: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj["cloud_run"]), + Kubernetes: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj["kubernetes"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cloud_run": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj.CloudRun), + "kubernetes": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj.Kubernetes), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{ + AutomaticTrafficControl: dcl.Bool(obj["automatic_traffic_control"].(bool)), + CanaryRevisionTags: tpgdclresource.ExpandStringArray(obj["canary_revision_tags"]), + PriorRevisionTags: tpgdclresource.ExpandStringArray(obj["prior_revision_tags"]), + StableRevisionTags: tpgdclresource.ExpandStringArray(obj["stable_revision_tags"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "automatic_traffic_control": obj.AutomaticTrafficControl, + "canary_revision_tags": obj.CanaryRevisionTags, + "prior_revision_tags": obj.PriorRevisionTags, + "stable_revision_tags": obj.StableRevisionTags, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{ + GatewayServiceMesh: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj["gateway_service_mesh"]), + ServiceNetworking: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj["service_networking"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gateway_service_mesh": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj.GatewayServiceMesh), + "service_networking": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj.ServiceNetworking), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{ + Deployment: dcl.String(obj["deployment"].(string)), + HttpRoute: dcl.String(obj["http_route"].(string)), + Service: dcl.String(obj["service"].(string)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), + RouteDestinations: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj["route_destinations"]), + RouteUpdateWaitTime: dcl.String(obj["route_update_wait_time"].(string)), + StableCutbackDuration: dcl.String(obj["stable_cutback_duration"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "http_route": obj.HttpRoute, + "service": obj.Service, + "pod_selector_label": obj.PodSelectorLabel, + "route_destinations": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj.RouteDestinations), + "route_update_wait_time": obj.RouteUpdateWaitTime, + "stable_cutback_duration": obj.StableCutbackDuration, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations{ + DestinationIds: tpgdclresource.ExpandStringArray(obj["destination_ids"]), + PropagateService: dcl.Bool(obj["propagate_service"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "destination_ids": obj.DestinationIds, + "propagate_service": obj.PropagateService, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{ + Deployment: dcl.String(obj["deployment"].(string)), + Service: dcl.String(obj["service"].(string)), + DisablePodOverprovisioning: dcl.Bool(obj["disable_pod_overprovisioning"].(bool)), + PodSelectorLabel: dcl.String(obj["pod_selector_label"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "service": obj.Service, + "disable_pod_overprovisioning": obj.DisablePodOverprovisioning, + "pod_selector_label": obj.PodSelectorLabel, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandard{ + Postdeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj["postdeploy"]), + Predeploy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj["predeploy"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj *DeliveryPipelineSerialPipelineStagesStrategyStandard) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "postdeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj.Postdeploy), + "predeploy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj.Predeploy), + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(o interface{}) *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy { + if o == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy + } + obj := objArr[0].(map[string]interface{}) + return &DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy{ + Actions: tpgdclresource.ExpandStringArray(obj["actions"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy(obj *DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "actions": obj.Actions, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineCondition(obj *DeliveryPipelineCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pipeline_ready_condition": flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj.PipelineReadyCondition), + "targets_present_condition": flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj.TargetsPresentCondition), + "targets_type_condition": flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj.TargetsTypeCondition), + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj *DeliveryPipelineConditionPipelineReadyCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj *DeliveryPipelineConditionTargetsPresentCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "missing_targets": obj.MissingTargets, + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj *DeliveryPipelineConditionTargetsTypeCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "error_details": obj.ErrorDetails, + "status": obj.Status, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployDeliveryPipelineTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployDeliveryPipelineAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl new file mode 100644 index 000000000000..ba1ec23e9dd0 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl @@ -0,0 +1,755 @@ +package clouddeploy_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" +{{- else }} + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +{{- if eq $.TargetVersionName "ga" }} +func TestAccClouddeployDeliveryPipeline_DeliveryPipeline(t *testing.T) { +{{- else }} +func TestAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(t *testing.T) { +{{- end }} + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- if ne $.TargetVersionName "ga" }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployDeliveryPipeline_DeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- end }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_DeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_DeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +{{- if eq $.TargetVersionName "ga" }} + +func testAccClouddeployDeliveryPipeline_DeliveryPipeline(context map[string]interface{}) string { +{{- else }} +func TestAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployDeliveryPipelineDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployDeliveryPipeline_VerifyDeliveryPipelineUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_delivery_pipeline.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccClouddeployDeliveryPipeline_CanaryDeliveryPipeline(context map[string]interface{}) string { +{{- end }} + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +{{- if ne $.TargetVersionName "ga" }} + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + custom_canary_deployment { + phase_configs { + percentage = 50 + phase_id = "first" + verify = true + } + + phase_configs { + percentage = 100 + phase_id = "stable" + verify = false + } + } + + runtime_config { + kubernetes { + gateway_service_mesh { + deployment = "example-deployment" + http_route = "example-http-route" + service = "example-service" + pod_selector_label = "example.com/app-name" + } + } + } + } + } + + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + + strategy { + canary { + canary_deployment { + percentages = [0, 5, 20] + verify = true + } + + runtime_config { + kubernetes { + gateway_service_mesh { + deployment = "example-deployment" + http_route = "example-http-route" + service = "example-service" + pod_selector_label = "example.com/app-name" + + route_destinations { + destination_ids = ["example-destination-id"] + propagate_service = true + } + } + } + } + } + } + + target_id = "example-target-three" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryServiceNetworkingDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + canary_deployment { + percentages = [25] + verify = true + } + + runtime_config { + kubernetes { + service_networking { + deployment = "example-deployment" + service = "example-service" + pod_selector_label = "example.com/app-name" + } + } + } + } + } + + target_id = "example-target-two" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_CanaryrunDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + canary { + canary_deployment { + percentages = [25] + verify = true + } + + runtime_config { + cloud_run { + automatic_traffic_control = true + } + } + } + } + + target_id = "example-target-two" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_DeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +{{- end }} +} + + +`, context) +} + +func testAccClouddeployDeliveryPipeline_DeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + target_id = "example-target-three" + } + } + + suspended = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + +{{- if ne $.TargetVersionName "ga" }} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_VerifyDeliveryPipeline(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "basic description" + project = "%{project_name}" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployDeliveryPipeline_VerifyDeliveryPipelineUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "%{region}" + name = "tf-test-pipeline%{random_suffix}" + description = "updated description" + project = "%{project_name}" + + serial_pipeline { + stages { + profiles = ["new-example-profile"] + + strategy { + standard { + verify = true + } + } + + target_id = "example-target-two" + } + + stages { + profiles = ["example-profile-four", "example-profile-five"] + target_id = "example-target-three" + } + } + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} +{{- end }} + +`, context) +} + +func testAccCheckClouddeployDeliveryPipelineDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_clouddeploy_delivery_pipeline" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Suspended: dcl.Bool(rs.Primary.Attributes["suspended"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetDeliveryPipeline(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_clouddeploy_delivery_pipeline still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go new file mode 100644 index 000000000000..8cfa22a46176 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go @@ -0,0 +1,53 @@ +package clouddeploy + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ClouddeployDeliveryPipeline", testSweepClouddeployDeliveryPipeline) +} + +func testSweepClouddeployDeliveryPipeline(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployDeliveryPipeline") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllDeliveryPipeline(context.Background(), d["project"], d["location"], isDeletableClouddeployDeliveryPipeline) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployDeliveryPipeline(r *DeliveryPipeline) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go new file mode 100644 index 000000000000..5192f27461eb --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target.go @@ -0,0 +1,1154 @@ +package clouddeploy + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployTarget() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployTargetCreate, + Read: resourceClouddeployTargetRead, + Update: resourceClouddeployTargetUpdate, + Delete: resourceClouddeployTargetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployTargetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + }, + + "anthos_cluster": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying an Anthos Cluster.", + MaxItems: 1, + Elem: ClouddeployTargetAnthosClusterSchema(), + ConflictsWith: []string{"gke", "run", "multi_target", "custom_target"}, + }, + + "associated_entities": { + Type: schema.TypeSet, + Optional: true, + Description: "Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + Elem: ClouddeployTargetAssociatedEntitiesSchema(), + Set: schema.HashResource(ClouddeployTargetAssociatedEntitiesSchema()), + }, + + "custom_target": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying a Custom Target.", + MaxItems: 1, + Elem: ClouddeployTargetCustomTargetSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "run", "multi_target"}, + }, + + "deploy_parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The deploy parameters to use for this target.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the `Target`. Max length is 255 characters.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "execution_configs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", + Elem: ClouddeployTargetExecutionConfigsSchema(), + }, + + "gke": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a GKE Cluster.", + MaxItems: 1, + Elem: ClouddeployTargetGkeSchema(), + ConflictsWith: []string{"anthos_cluster", "run", "multi_target", "custom_target"}, + }, + + "multi_target": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a multiTarget.", + MaxItems: 1, + Elem: ClouddeployTargetMultiTargetSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "run", "custom_target"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "require_approval": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether or not the `Target` requires approval.", + }, + + "run": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a Cloud Run deployment target.", + MaxItems: 1, + Elem: ClouddeployTargetRunSchema(), + ConflictsWith: []string{"gke", "anthos_cluster", "multi_target", "custom_target"}, + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the `Target` was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "target_id": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Resource id of the `Target`.", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique identifier of the `Target`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Most recent time at which the `Target` was updated.", + }, + }, + } +} + +func ClouddeployTargetAnthosClusterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "membership": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "entity_id": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "anthos_clusters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying Anthos clusters as associated entities.", + Elem: ClouddeployTargetAssociatedEntitiesAnthosClustersSchema(), + }, + + "gke_clusters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Information specifying GKE clusters as associated entities.", + Elem: ClouddeployTargetAssociatedEntitiesGkeClustersSchema(), + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesAnthosClustersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "membership": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + }, + }, + } +} + +func ClouddeployTargetAssociatedEntitiesGkeClustersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", + }, + + "internal_ip": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + + "proxy_url": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + } +} + +func ClouddeployTargetCustomTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_target_type": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.", + }, + }, + } +} + +func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "usages": { + Type: schema.TypeList, + Required: true, + Description: "Required. Usages when this configuration should be applied.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "artifact_storage": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used.", + }, + + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", + }, + + "verbose": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, additional logging will be enabled when running builds in this execution environment.", + }, + + "worker_pool": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used.", + }, + }, + } +} + +func ClouddeployTargetGkeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.", + }, + + "dns_endpoint": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true.", + }, + + "internal_ip": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + + "proxy_url": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + } +} + +func ClouddeployTargetMultiTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_ids": { + Type: schema.TypeList, + Required: true, + Description: "Required. The target_ids of this multiTarget.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployTargetRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + } +} + +func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTarget(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Target: %s", err) + } + + log.Printf("[DEBUG] Finished creating Target %q: %#v", d.Id(), res) + + return resourceClouddeployTargetRead(d, meta) +} + +func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetTarget(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ClouddeployTarget %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("anthos_cluster", flattenClouddeployTargetAnthosCluster(res.AnthosCluster)); err != nil { + return fmt.Errorf("error setting anthos_cluster in state: %s", err) + } + if err = d.Set("associated_entities", flattenClouddeployTargetAssociatedEntitiesMap(res.AssociatedEntities)); err != nil { + return fmt.Errorf("error setting associated_entities in state: %s", err) + } + if err = d.Set("custom_target", flattenClouddeployTargetCustomTarget(res.CustomTarget)); err != nil { + return fmt.Errorf("error setting custom_target in state: %s", err) + } + if err = d.Set("deploy_parameters", res.DeployParameters); err != nil { + return fmt.Errorf("error setting deploy_parameters in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("execution_configs", flattenClouddeployTargetExecutionConfigsArray(res.ExecutionConfigs)); err != nil { + return fmt.Errorf("error setting execution_configs in state: %s", err) + } + if err = d.Set("gke", flattenClouddeployTargetGke(res.Gke)); err != nil { + return fmt.Errorf("error setting gke in state: %s", err) + } + if err = d.Set("multi_target", flattenClouddeployTargetMultiTarget(res.MultiTarget)); err != nil { + return fmt.Errorf("error setting multi_target in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("require_approval", res.RequireApproval); err != nil { + return fmt.Errorf("error setting require_approval in state: %s", err) + } + if err = d.Set("run", flattenClouddeployTargetRun(res.Run)); err != nil { + return fmt.Errorf("error setting run in state: %s", err) + } + if err = d.Set("annotations", flattenClouddeployTargetAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("labels", flattenClouddeployTargetLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("target_id", res.TargetId); err != nil { + return fmt.Errorf("error setting target_id in state: %s", err) + } + if err = d.Set("terraform_labels", flattenClouddeployTargetTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTarget(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Target: %s", err) + } + + log.Printf("[DEBUG] Finished creating Target %q: %#v", d.Id(), res) + + return resourceClouddeployTargetRead(d, meta) +} + +func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Target{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + AssociatedEntities: expandClouddeployTargetAssociatedEntitiesMap(d.Get("associated_entities")), + CustomTarget: expandClouddeployTargetCustomTarget(d.Get("custom_target")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), + Gke: expandClouddeployTargetGke(d.Get("gke")), + MultiTarget: expandClouddeployTargetMultiTarget(d.Get("multi_target")), + Project: dcl.String(project), + RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), + } + + log.Printf("[DEBUG] Deleting Target %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteTarget(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Target: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Target %q", d.Id()) + return nil +} + +func resourceClouddeployTargetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/targets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandClouddeployTargetAnthosCluster(o interface{}) *TargetAnthosCluster { + if o == nil { + return EmptyTargetAnthosCluster + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetAnthosCluster + } + obj := objArr[0].(map[string]interface{}) + return &TargetAnthosCluster{ + Membership: dcl.String(obj["membership"].(string)), + } +} + +func flattenClouddeployTargetAnthosCluster(obj *TargetAnthosCluster) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetAssociatedEntitiesMap(o interface{}) map[string]TargetAssociatedEntities { + if o == nil { + return make(map[string]TargetAssociatedEntities) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make(map[string]TargetAssociatedEntities) + } + + items := make(map[string]TargetAssociatedEntities) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntities(item) + if item != nil { + items[item.(map[string]interface{})["entity_id"].(string)] = *i + } + } + + return items +} + +func expandClouddeployTargetAssociatedEntities(o interface{}) *TargetAssociatedEntities { + if o == nil { + return EmptyTargetAssociatedEntities + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntities{ + AnthosClusters: expandClouddeployTargetAssociatedEntitiesAnthosClustersArray(obj["anthos_clusters"]), + GkeClusters: expandClouddeployTargetAssociatedEntitiesGkeClustersArray(obj["gke_clusters"]), + } +} + +func flattenClouddeployTargetAssociatedEntitiesMap(objs map[string]TargetAssociatedEntities) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenClouddeployTargetAssociatedEntities(&item, name) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntities(obj *TargetAssociatedEntities, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "anthos_clusters": flattenClouddeployTargetAssociatedEntitiesAnthosClustersArray(obj.AnthosClusters), + "gke_clusters": flattenClouddeployTargetAssociatedEntitiesGkeClustersArray(obj.GkeClusters), + } + + transformed["entity_id"] = name + + return transformed + +} +func expandClouddeployTargetAssociatedEntitiesAnthosClustersArray(o interface{}) []TargetAssociatedEntitiesAnthosClusters { + if o == nil { + return make([]TargetAssociatedEntitiesAnthosClusters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]TargetAssociatedEntitiesAnthosClusters, 0) + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntitiesAnthosClusters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetAssociatedEntitiesAnthosClusters(o interface{}) *TargetAssociatedEntitiesAnthosClusters { + if o == nil { + return EmptyTargetAssociatedEntitiesAnthosClusters + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntitiesAnthosClusters{ + Membership: dcl.String(obj["membership"].(string)), + } +} + +func flattenClouddeployTargetAssociatedEntitiesAnthosClustersArray(objs []TargetAssociatedEntitiesAnthosClusters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetAssociatedEntitiesAnthosClusters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntitiesAnthosClusters(obj *TargetAssociatedEntitiesAnthosClusters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "membership": obj.Membership, + } + + return transformed + +} +func expandClouddeployTargetAssociatedEntitiesGkeClustersArray(o interface{}) []TargetAssociatedEntitiesGkeClusters { + if o == nil { + return make([]TargetAssociatedEntitiesGkeClusters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]TargetAssociatedEntitiesGkeClusters, 0) + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetAssociatedEntitiesGkeClusters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetAssociatedEntitiesGkeClusters(o interface{}) *TargetAssociatedEntitiesGkeClusters { + if o == nil { + return EmptyTargetAssociatedEntitiesGkeClusters + } + + obj := o.(map[string]interface{}) + return &TargetAssociatedEntitiesGkeClusters{ + Cluster: dcl.String(obj["cluster"].(string)), + InternalIP: dcl.Bool(obj["internal_ip"].(bool)), + ProxyUrl: dcl.String(obj["proxy_url"].(string)), + } +} + +func flattenClouddeployTargetAssociatedEntitiesGkeClustersArray(objs []TargetAssociatedEntitiesGkeClusters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetAssociatedEntitiesGkeClusters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetAssociatedEntitiesGkeClusters(obj *TargetAssociatedEntitiesGkeClusters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster": obj.Cluster, + "internal_ip": obj.InternalIP, + "proxy_url": obj.ProxyUrl, + } + + return transformed + +} + +func expandClouddeployTargetCustomTarget(o interface{}) *TargetCustomTarget { + if o == nil { + return EmptyTargetCustomTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetCustomTarget + } + obj := objArr[0].(map[string]interface{}) + return &TargetCustomTarget{ + CustomTargetType: dcl.String(obj["custom_target_type"].(string)), + } +} + +func flattenClouddeployTargetCustomTarget(obj *TargetCustomTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "custom_target_type": obj.CustomTargetType, + } + + return []interface{}{transformed} + +} +func expandClouddeployTargetExecutionConfigsArray(o interface{}) []TargetExecutionConfigs { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]TargetExecutionConfigs, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployTargetExecutionConfigs(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployTargetExecutionConfigs(o interface{}) *TargetExecutionConfigs { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &TargetExecutionConfigs{ + Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), + ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), + ExecutionTimeout: dcl.StringOrNil(obj["execution_timeout"].(string)), + ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), + Verbose: dcl.Bool(obj["verbose"].(bool)), + WorkerPool: dcl.String(obj["worker_pool"].(string)), + } +} + +func flattenClouddeployTargetExecutionConfigsArray(objs []TargetExecutionConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployTargetExecutionConfigs(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployTargetExecutionConfigs(obj *TargetExecutionConfigs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "usages": flattenClouddeployTargetExecutionConfigsUsagesArray(obj.Usages), + "artifact_storage": obj.ArtifactStorage, + "execution_timeout": obj.ExecutionTimeout, + "service_account": obj.ServiceAccount, + "verbose": obj.Verbose, + "worker_pool": obj.WorkerPool, + } + + return transformed + +} + +func expandClouddeployTargetGke(o interface{}) *TargetGke { + if o == nil { + return EmptyTargetGke + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetGke + } + obj := objArr[0].(map[string]interface{}) + return &TargetGke{ + Cluster: dcl.String(obj["cluster"].(string)), + DnsEndpoint: dcl.Bool(obj["dns_endpoint"].(bool)), + InternalIP: dcl.Bool(obj["internal_ip"].(bool)), + ProxyUrl: dcl.String(obj["proxy_url"].(string)), + } +} + +func flattenClouddeployTargetGke(obj *TargetGke) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster": obj.Cluster, + "dns_endpoint": obj.DnsEndpoint, + "internal_ip": obj.InternalIP, + "proxy_url": obj.ProxyUrl, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetMultiTarget(o interface{}) *TargetMultiTarget { + if o == nil { + return EmptyTargetMultiTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetMultiTarget + } + obj := objArr[0].(map[string]interface{}) + return &TargetMultiTarget{ + TargetIds: tpgdclresource.ExpandStringArray(obj["target_ids"]), + } +} + +func flattenClouddeployTargetMultiTarget(obj *TargetMultiTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "target_ids": obj.TargetIds, + } + + return []interface{}{transformed} + +} + +func expandClouddeployTargetRun(o interface{}) *TargetRun { + if o == nil { + return EmptyTargetRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyTargetRun + } + obj := objArr[0].(map[string]interface{}) + return &TargetRun{ + Location: dcl.String(obj["location"].(string)), + } +} + +func flattenClouddeployTargetRun(obj *TargetRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location": obj.Location, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployTargetLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []TargetExecutionConfigsUsagesEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandClouddeployTargetExecutionConfigsUsagesArray(o interface{}) []TargetExecutionConfigsUsagesEnum { + objs := o.([]interface{}) + items := make([]TargetExecutionConfigsUsagesEnum, 0, len(objs)) + for _, item := range objs { + i := TargetExecutionConfigsUsagesEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl new file mode 100644 index 000000000000..ffbc1f8e1e0c --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl @@ -0,0 +1,544 @@ +package clouddeploy_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" +{{- else }} + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +{{- if eq $.TargetVersionName "ga" }} +func TestAccClouddeployTarget_Target(t *testing.T) { +{{- else }} +func TestAccClouddeployTarget_MultiTarget(t *testing.T) { +{{- end }} + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- if ne $.TargetVersionName "ga" }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_MultiTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_MultiTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployTarget_RunTarget(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_RunTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_RunTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} +func TestAccClouddeployTarget_Target(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ +{{- end }} + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_Target(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate1(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate2(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_TargetUpdate3(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +{{- if ne $.TargetVersionName "ga" }} +} + +func testAccClouddeployTarget_MultiTarget(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "multi-target description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + multi_target { + target_ids = ["1", "2"] + } + + project = "%{project_name}" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_MultiTargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated mutli-target description" + + multi_target { + target_ids = ["1", "2", "3"] + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "example-label-2" + + my_third_label = "example-label-3" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_RunTarget(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "basic description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + project = "%{project_name}" + require_approval = false + + run { + location = "projects/%{project_name}/locations/%{region}" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_RunTargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "basic description" + project = "%{project_name}" + require_approval = true + + run { + location = "projects/%{project_name}/locations/%{region}" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +{{- end }} +} + +func testAccClouddeployTarget_Target(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + + deploy_parameters = { + deployParameterKey = "deployParameterValue" + } + + description = "basic description" + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/example-cluster-name" + } + + project = "%{project_name}" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate1(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" + } + + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccClouddeployTarget_TargetUpdate3(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "updated description" + + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://other-bucket/other-dir" + service_account = "other-owner@%{project_name}.iam.gserviceaccount.com" + verbose = true + } + + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + } + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true + proxy_url = "http://10.0.0.1" + } + + project = "%{project_name}" + require_approval = true + + annotations = { + my_second_annotation = "updated-example-annotation-2" + + my_third_annotation = "example-annotation-3" + } + + labels = { + my_second_label = "updated-example-label-2" + + my_third_label = "example-label-3" + } +} + + +`, context) +} + +func testAccCheckClouddeployTargetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_clouddeploy_target" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &clouddeploy.Target{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + RequireApproval: dcl.Bool(rs.Primary.Attributes["require_approval"] == "true"), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + TargetId: dcl.StringOrNil(rs.Primary.Attributes["target_id"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetTarget(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_clouddeploy_target still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go new file mode 100644 index 000000000000..6b7be9f302ea --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_sweeper.go @@ -0,0 +1,53 @@ +package clouddeploy + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("ClouddeployTarget", testSweepClouddeployTarget) +} + +func testSweepClouddeployTarget(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployTarget") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllTarget(context.Background(), d["project"], d["location"], isDeletableClouddeployTarget) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployTarget(r *Target) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl new file mode 100644 index 000000000000..be22bd836d1a --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl @@ -0,0 +1,882 @@ +package clouddeploy + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Target struct { + Name *string `json:"name"` + TargetId *string `json:"targetId"` + Uid *string `json:"uid"` + Description *string `json:"description"` + Annotations map[string]string `json:"annotations"` + Labels map[string]string `json:"labels"` + RequireApproval *bool `json:"requireApproval"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Gke *TargetGke `json:"gke"` + AnthosCluster *TargetAnthosCluster `json:"anthosCluster"` + Etag *string `json:"etag"` + ExecutionConfigs []TargetExecutionConfigs `json:"executionConfigs"` + Project *string `json:"project"` + Location *string `json:"location"` + Run *TargetRun `json:"run"` + MultiTarget *TargetMultiTarget `json:"multiTarget"` + DeployParameters map[string]string `json:"deployParameters"` + CustomTarget *TargetCustomTarget `json:"customTarget"` + AssociatedEntities map[string]TargetAssociatedEntities `json:"associatedEntities"` +} + +func (r *Target) String() string { + return dcl.SprintResource(r) +} + +// The enum TargetExecutionConfigsUsagesEnum. +type TargetExecutionConfigsUsagesEnum string + +// TargetExecutionConfigsUsagesEnumRef returns a *TargetExecutionConfigsUsagesEnum with the value of string s +// If the empty string is provided, nil is returned. +func TargetExecutionConfigsUsagesEnumRef(s string) *TargetExecutionConfigsUsagesEnum { + v := TargetExecutionConfigsUsagesEnum(s) + return &v +} + +func (v TargetExecutionConfigsUsagesEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED", "RENDER", "DEPLOY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "TargetExecutionConfigsUsagesEnum", + Value: string(v), + Valid: []string{}, + } +} + +type TargetGke struct { + empty bool `json:"-"` + Cluster *string `json:"cluster"` + InternalIP *bool `json:"internalIP"` + ProxyUrl *string `json:"proxyUrl"` + DnsEndpoint *bool `json:"dnsEndpoint"` +} + +type jsonTargetGke TargetGke + +func (r *TargetGke) UnmarshalJSON(data []byte) error { + var res jsonTargetGke + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetGke + } else { + + r.Cluster = res.Cluster + + r.InternalIP = res.InternalIP + + r.ProxyUrl = res.ProxyUrl + + r.DnsEndpoint = res.DnsEndpoint + + } + return nil +} + +// This object is used to assert a desired state where this TargetGke is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetGke *TargetGke = &TargetGke{empty: true} + +func (r *TargetGke) Empty() bool { + return r.empty +} + +func (r *TargetGke) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetGke) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAnthosCluster struct { + empty bool `json:"-"` + Membership *string `json:"membership"` +} + +type jsonTargetAnthosCluster TargetAnthosCluster + +func (r *TargetAnthosCluster) UnmarshalJSON(data []byte) error { + var res jsonTargetAnthosCluster + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAnthosCluster + } else { + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this TargetAnthosCluster is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAnthosCluster *TargetAnthosCluster = &TargetAnthosCluster{empty: true} + +func (r *TargetAnthosCluster) Empty() bool { + return r.empty +} + +func (r *TargetAnthosCluster) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAnthosCluster) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetExecutionConfigs struct { + empty bool `json:"-"` + Usages []TargetExecutionConfigsUsagesEnum `json:"usages"` + WorkerPool *string `json:"workerPool"` + ServiceAccount *string `json:"serviceAccount"` + ArtifactStorage *string `json:"artifactStorage"` + ExecutionTimeout *string `json:"executionTimeout"` + Verbose *bool `json:"verbose"` +} + +type jsonTargetExecutionConfigs TargetExecutionConfigs + +func (r *TargetExecutionConfigs) UnmarshalJSON(data []byte) error { + var res jsonTargetExecutionConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetExecutionConfigs + } else { + + r.Usages = res.Usages + + r.WorkerPool = res.WorkerPool + + r.ServiceAccount = res.ServiceAccount + + r.ArtifactStorage = res.ArtifactStorage + + r.ExecutionTimeout = res.ExecutionTimeout + + r.Verbose = res.Verbose + + } + return nil +} + +// This object is used to assert a desired state where this TargetExecutionConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetExecutionConfigs *TargetExecutionConfigs = &TargetExecutionConfigs{empty: true} + +func (r *TargetExecutionConfigs) Empty() bool { + return r.empty +} + +func (r *TargetExecutionConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetExecutionConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetRun struct { + empty bool `json:"-"` + Location *string `json:"location"` +} + +type jsonTargetRun TargetRun + +func (r *TargetRun) UnmarshalJSON(data []byte) error { + var res jsonTargetRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetRun + } else { + + r.Location = res.Location + + } + return nil +} + +// This object is used to assert a desired state where this TargetRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetRun *TargetRun = &TargetRun{empty: true} + +func (r *TargetRun) Empty() bool { + return r.empty +} + +func (r *TargetRun) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetMultiTarget struct { + empty bool `json:"-"` + TargetIds []string `json:"targetIds"` +} + +type jsonTargetMultiTarget TargetMultiTarget + +func (r *TargetMultiTarget) UnmarshalJSON(data []byte) error { + var res jsonTargetMultiTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetMultiTarget + } else { + + r.TargetIds = res.TargetIds + + } + return nil +} + +// This object is used to assert a desired state where this TargetMultiTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetMultiTarget *TargetMultiTarget = &TargetMultiTarget{empty: true} + +func (r *TargetMultiTarget) Empty() bool { + return r.empty +} + +func (r *TargetMultiTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetMultiTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetCustomTarget struct { + empty bool `json:"-"` + CustomTargetType *string `json:"customTargetType"` +} + +type jsonTargetCustomTarget TargetCustomTarget + +func (r *TargetCustomTarget) UnmarshalJSON(data []byte) error { + var res jsonTargetCustomTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetCustomTarget + } else { + + r.CustomTargetType = res.CustomTargetType + + } + return nil +} + +// This object is used to assert a desired state where this TargetCustomTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetCustomTarget *TargetCustomTarget = &TargetCustomTarget{empty: true} + +func (r *TargetCustomTarget) Empty() bool { + return r.empty +} + +func (r *TargetCustomTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetCustomTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntities struct { + empty bool `json:"-"` + GkeClusters []TargetAssociatedEntitiesGkeClusters `json:"gkeClusters"` + AnthosClusters []TargetAssociatedEntitiesAnthosClusters `json:"anthosClusters"` +} + +type jsonTargetAssociatedEntities TargetAssociatedEntities + +func (r *TargetAssociatedEntities) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntities + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntities + } else { + + r.GkeClusters = res.GkeClusters + + r.AnthosClusters = res.AnthosClusters + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntities is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntities *TargetAssociatedEntities = &TargetAssociatedEntities{empty: true} + +func (r *TargetAssociatedEntities) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntities) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntities) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntitiesGkeClusters struct { + empty bool `json:"-"` + Cluster *string `json:"cluster"` + InternalIP *bool `json:"internalIP"` + ProxyUrl *string `json:"proxyUrl"` +} + +type jsonTargetAssociatedEntitiesGkeClusters TargetAssociatedEntitiesGkeClusters + +func (r *TargetAssociatedEntitiesGkeClusters) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntitiesGkeClusters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntitiesGkeClusters + } else { + + r.Cluster = res.Cluster + + r.InternalIP = res.InternalIP + + r.ProxyUrl = res.ProxyUrl + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntitiesGkeClusters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntitiesGkeClusters *TargetAssociatedEntitiesGkeClusters = &TargetAssociatedEntitiesGkeClusters{empty: true} + +func (r *TargetAssociatedEntitiesGkeClusters) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntitiesGkeClusters) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntitiesGkeClusters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type TargetAssociatedEntitiesAnthosClusters struct { + empty bool `json:"-"` + Membership *string `json:"membership"` +} + +type jsonTargetAssociatedEntitiesAnthosClusters TargetAssociatedEntitiesAnthosClusters + +func (r *TargetAssociatedEntitiesAnthosClusters) UnmarshalJSON(data []byte) error { + var res jsonTargetAssociatedEntitiesAnthosClusters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetAssociatedEntitiesAnthosClusters + } else { + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this TargetAssociatedEntitiesAnthosClusters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetAssociatedEntitiesAnthosClusters *TargetAssociatedEntitiesAnthosClusters = &TargetAssociatedEntitiesAnthosClusters{empty: true} + +func (r *TargetAssociatedEntitiesAnthosClusters) Empty() bool { + return r.empty +} + +func (r *TargetAssociatedEntitiesAnthosClusters) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetAssociatedEntitiesAnthosClusters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Target) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "clouddeploy", + Type: "Target", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "clouddeploy", +{{- end }} + } +} + +func (r *Target) ID() (string, error) { + if err := extractTargetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "target_id": dcl.ValueOrEmptyString(nr.TargetId), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "description": dcl.ValueOrEmptyString(nr.Description), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "require_approval": dcl.ValueOrEmptyString(nr.RequireApproval), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "gke": dcl.ValueOrEmptyString(nr.Gke), + "anthos_cluster": dcl.ValueOrEmptyString(nr.AnthosCluster), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "execution_configs": dcl.ValueOrEmptyString(nr.ExecutionConfigs), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "run": dcl.ValueOrEmptyString(nr.Run), + "multi_target": dcl.ValueOrEmptyString(nr.MultiTarget), + "deploy_parameters": dcl.ValueOrEmptyString(nr.DeployParameters), + "custom_target": dcl.ValueOrEmptyString(nr.CustomTarget), + "associated_entities": dcl.ValueOrEmptyString(nr.AssociatedEntities), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const TargetMaxPage = -1 + +type TargetList struct { + Items []*Target + + nextToken string + + pageSize int32 + + resource *Target +} + +func (l *TargetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *TargetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listTarget(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListTarget(ctx context.Context, project, location string) (*TargetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListTargetWithMaxResults(ctx, project, location, TargetMaxPage) + +} + +func (c *Client) ListTargetWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*TargetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Target{ + Project: &project, + Location: &location, + } + items, token, err := c.listTarget(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &TargetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetTarget(ctx context.Context, r *Target) (*Target, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractTargetFields(r) + + b, err := c.getTargetRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalTarget(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeTargetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractTargetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteTarget(ctx context.Context, r *Target) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Target resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Target...") + deleteOp := deleteTargetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllTarget deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllTarget(ctx context.Context, project, location string, filter func(*Target) bool) error { + listObj, err := c.ListTarget(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllTarget(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllTarget(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyTarget(ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (*Target, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Target + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyTargetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyTargetHelper(c *Client, ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (*Target, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyTarget...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractTargetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.targetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToTargetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []targetApiOperation + if create { + ops = append(ops, &createTargetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyTargetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyTargetDiff(c *Client, ctx context.Context, desired *Target, rawDesired *Target, ops []targetApiOperation, opts ...dcl.ApplyOption) (*Target, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetTarget(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createTargetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapTarget(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeTargetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeTargetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeTargetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractTargetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractTargetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffTarget(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Target) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl new file mode 100644 index 000000000000..4ead247eaeb7 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl @@ -0,0 +1,4011 @@ +package clouddeploy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Target) validate() error { + + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Gke", "AnthosCluster", "Run", "MultiTarget", "CustomTarget"}, r.Gke, r.AnthosCluster, r.Run, r.MultiTarget, r.CustomTarget); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Gke) { + if err := r.Gke.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AnthosCluster) { + if err := r.AnthosCluster.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Run) { + if err := r.Run.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MultiTarget) { + if err := r.MultiTarget.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CustomTarget) { + if err := r.CustomTarget.validate(); err != nil { + return err + } + } + return nil +} +func (r *TargetGke) validate() error { + return nil +} +func (r *TargetAnthosCluster) validate() error { + return nil +} +func (r *TargetExecutionConfigs) validate() error { + if err := dcl.Required(r, "usages"); err != nil { + return err + } + if err := dcl.ValidateAtMostOneOfFieldsSet([]string(nil)); err != nil { + return err + } + return nil +} +func (r *TargetRun) validate() error { + if err := dcl.Required(r, "location"); err != nil { + return err + } + return nil +} +func (r *TargetMultiTarget) validate() error { + if err := dcl.Required(r, "targetIds"); err != nil { + return err + } + return nil +} +func (r *TargetCustomTarget) validate() error { + if err := dcl.Required(r, "customTargetType"); err != nil { + return err + } + return nil +} +func (r *TargetAssociatedEntities) validate() error { + return nil +} +func (r *TargetAssociatedEntitiesGkeClusters) validate() error { + return nil +} +func (r *TargetAssociatedEntitiesAnthosClusters) validate() error { + return nil +} +func (r *Target) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) +} + +func (r *Target) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Target) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Target) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets?targetId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Target) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Target) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Target) SetPolicyVerb() string { + return "" +} + +func (r *Target) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Target) IAMPolicyVersion() int { + return 3 +} + +// targetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type targetApiOperation interface { + do(context.Context, *Target, *Client) error +} + +// newUpdateTargetUpdateTargetRequest creates a request for an +// Target resource's UpdateTarget update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateTargetUpdateTargetRequest(ctx context.Context, f *Target, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.RequireApproval; !dcl.IsEmptyValueIndirect(v) { + req["requireApproval"] = v + } + if v, err := expandTargetGke(c, f.Gke, res); err != nil { + return nil, fmt.Errorf("error expanding Gke into gke: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["gke"] = v + } + if v, err := expandTargetAnthosCluster(c, f.AnthosCluster, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosCluster into anthosCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["anthosCluster"] = v + } + if v, err := expandTargetExecutionConfigsSlice(c, f.ExecutionConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding ExecutionConfigs into executionConfigs: %w", err) + } else if v != nil { + req["executionConfigs"] = v + } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["run"] = v + } + if v, err := expandTargetMultiTarget(c, f.MultiTarget, res); err != nil { + return nil, fmt.Errorf("error expanding MultiTarget into multiTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["multiTarget"] = v + } + if v := f.DeployParameters; !dcl.IsEmptyValueIndirect(v) { + req["deployParameters"] = v + } + if v, err := expandTargetCustomTarget(c, f.CustomTarget, res); err != nil { + return nil, fmt.Errorf("error expanding CustomTarget into customTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["customTarget"] = v + } + if v, err := expandTargetAssociatedEntitiesMap(c, f.AssociatedEntities, res); err != nil { + return nil, fmt.Errorf("error expanding AssociatedEntities into associatedEntities: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["associatedEntities"] = v + } + b, err := c.getTargetRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/targets/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateTargetUpdateTargetRequest converts the update into +// the final JSON request body. +func marshalUpdateTargetUpdateTargetRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateTargetUpdateTargetOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateTargetUpdateTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + _, err := c.GetTarget(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateTarget") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateTargetUpdateTargetRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateTargetUpdateTargetRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listTargetRaw(ctx context.Context, r *Target, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != TargetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listTargetOperation struct { + Targets []map[string]interface{} `json:"targets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listTarget(ctx context.Context, r *Target, pageToken string, pageSize int32) ([]*Target, string, error) { + b, err := c.listTargetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listTargetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Target + for _, v := range m.Targets { + res, err := unmarshalMapTarget(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllTarget(ctx context.Context, f func(*Target) bool, resources []*Target) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteTarget(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteTargetOperation struct{} + +func (op *deleteTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + r, err := c.GetTarget(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Target not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetTarget checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetTarget(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createTargetOperation struct { + response map[string]interface{} +} + +func (op *createTargetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createTargetOperation) do(ctx context.Context, r *Target, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetTarget(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getTargetRaw(ctx context.Context, r *Target) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) targetDiffsForRawDesired(ctx context.Context, rawDesired *Target, opts ...dcl.ApplyOption) (initial, desired *Target, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Target + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Target); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Target, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetTarget(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Target resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Target resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Target resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeTargetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Target: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Target: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractTargetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeTargetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Target: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeTargetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Target: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffTarget(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeTargetInitialState(rawInitial, rawDesired *Target) (*Target, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.Gke) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AnthosCluster, rawInitial.Run, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.Gke = EmptyTargetGke + } + } + + if !dcl.IsZeroValue(rawInitial.AnthosCluster) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.Run, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.AnthosCluster = EmptyTargetAnthosCluster + } + } + + if !dcl.IsZeroValue(rawInitial.Run) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.MultiTarget, rawInitial.CustomTarget) { + rawInitial.Run = EmptyTargetRun + } + } + + if !dcl.IsZeroValue(rawInitial.MultiTarget) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.Run, rawInitial.CustomTarget) { + rawInitial.MultiTarget = EmptyTargetMultiTarget + } + } + + if !dcl.IsZeroValue(rawInitial.CustomTarget) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster, rawInitial.Run, rawInitial.MultiTarget) { + rawInitial.CustomTarget = EmptyTargetCustomTarget + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeTargetDesiredState(rawDesired, rawInitial *Target, opts ...dcl.ApplyOption) (*Target, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Gke = canonicalizeTargetGke(rawDesired.Gke, nil, opts...) + rawDesired.AnthosCluster = canonicalizeTargetAnthosCluster(rawDesired.AnthosCluster, nil, opts...) + rawDesired.Run = canonicalizeTargetRun(rawDesired.Run, nil, opts...) + rawDesired.MultiTarget = canonicalizeTargetMultiTarget(rawDesired.MultiTarget, nil, opts...) + rawDesired.CustomTarget = canonicalizeTargetCustomTarget(rawDesired.CustomTarget, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Target{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.BoolCanonicalize(rawDesired.RequireApproval, rawInitial.RequireApproval) { + canonicalDesired.RequireApproval = rawInitial.RequireApproval + } else { + canonicalDesired.RequireApproval = rawDesired.RequireApproval + } + canonicalDesired.Gke = canonicalizeTargetGke(rawDesired.Gke, rawInitial.Gke, opts...) + canonicalDesired.AnthosCluster = canonicalizeTargetAnthosCluster(rawDesired.AnthosCluster, rawInitial.AnthosCluster, opts...) + canonicalDesired.ExecutionConfigs = canonicalizeTargetExecutionConfigsSlice(rawDesired.ExecutionConfigs, rawInitial.ExecutionConfigs, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Run = canonicalizeTargetRun(rawDesired.Run, rawInitial.Run, opts...) + canonicalDesired.MultiTarget = canonicalizeTargetMultiTarget(rawDesired.MultiTarget, rawInitial.MultiTarget, opts...) + if dcl.IsZeroValue(rawDesired.DeployParameters) || (dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) && dcl.IsEmptyValueIndirect(rawInitial.DeployParameters)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.DeployParameters = rawInitial.DeployParameters + } else { + canonicalDesired.DeployParameters = rawDesired.DeployParameters + } + canonicalDesired.CustomTarget = canonicalizeTargetCustomTarget(rawDesired.CustomTarget, rawInitial.CustomTarget, opts...) + if dcl.IsZeroValue(rawDesired.AssociatedEntities) || (dcl.IsEmptyValueIndirect(rawDesired.AssociatedEntities) && dcl.IsEmptyValueIndirect(rawInitial.AssociatedEntities)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.AssociatedEntities = rawInitial.AssociatedEntities + } else { + canonicalDesired.AssociatedEntities = rawDesired.AssociatedEntities + } + + if canonicalDesired.Gke != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AnthosCluster, rawDesired.Run, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.Gke = EmptyTargetGke + } + } + + if canonicalDesired.AnthosCluster != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.Run, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.AnthosCluster = EmptyTargetAnthosCluster + } + } + + if canonicalDesired.Run != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.MultiTarget, rawDesired.CustomTarget) { + canonicalDesired.Run = EmptyTargetRun + } + } + + if canonicalDesired.MultiTarget != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.Run, rawDesired.CustomTarget) { + canonicalDesired.MultiTarget = EmptyTargetMultiTarget + } + } + + if canonicalDesired.CustomTarget != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster, rawDesired.Run, rawDesired.MultiTarget) { + canonicalDesired.CustomTarget = EmptyTargetCustomTarget + } + } + + return canonicalDesired, nil +} + +func canonicalizeTargetNewState(c *Client, rawNew, rawDesired *Target) (*Target, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.TargetId) && dcl.IsEmptyValueIndirect(rawDesired.TargetId) { + rawNew.TargetId = rawDesired.TargetId + } else { + if dcl.StringCanonicalize(rawDesired.TargetId, rawNew.TargetId) { + rawNew.TargetId = rawDesired.TargetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.RequireApproval) && dcl.IsEmptyValueIndirect(rawDesired.RequireApproval) { + rawNew.RequireApproval = rawDesired.RequireApproval + } else { + if dcl.BoolCanonicalize(rawDesired.RequireApproval, rawNew.RequireApproval) { + rawNew.RequireApproval = rawDesired.RequireApproval + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Gke) && dcl.IsEmptyValueIndirect(rawDesired.Gke) { + rawNew.Gke = rawDesired.Gke + } else { + rawNew.Gke = canonicalizeNewTargetGke(c, rawDesired.Gke, rawNew.Gke) + } + + if dcl.IsEmptyValueIndirect(rawNew.AnthosCluster) && dcl.IsEmptyValueIndirect(rawDesired.AnthosCluster) { + rawNew.AnthosCluster = rawDesired.AnthosCluster + } else { + rawNew.AnthosCluster = canonicalizeNewTargetAnthosCluster(c, rawDesired.AnthosCluster, rawNew.AnthosCluster) + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ExecutionConfigs) && dcl.IsEmptyValueIndirect(rawDesired.ExecutionConfigs) { + rawNew.ExecutionConfigs = rawDesired.ExecutionConfigs + } else { + rawNew.ExecutionConfigs = canonicalizeNewTargetExecutionConfigsSlice(c, rawDesired.ExecutionConfigs, rawNew.ExecutionConfigs) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Run) && dcl.IsEmptyValueIndirect(rawDesired.Run) { + rawNew.Run = rawDesired.Run + } else { + rawNew.Run = canonicalizeNewTargetRun(c, rawDesired.Run, rawNew.Run) + } + + if dcl.IsEmptyValueIndirect(rawNew.MultiTarget) && dcl.IsEmptyValueIndirect(rawDesired.MultiTarget) { + rawNew.MultiTarget = rawDesired.MultiTarget + } else { + rawNew.MultiTarget = canonicalizeNewTargetMultiTarget(c, rawDesired.MultiTarget, rawNew.MultiTarget) + } + + if dcl.IsEmptyValueIndirect(rawNew.DeployParameters) && dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) { + rawNew.DeployParameters = rawDesired.DeployParameters + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CustomTarget) && dcl.IsEmptyValueIndirect(rawDesired.CustomTarget) { + rawNew.CustomTarget = rawDesired.CustomTarget + } else { + rawNew.CustomTarget = canonicalizeNewTargetCustomTarget(c, rawDesired.CustomTarget, rawNew.CustomTarget) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssociatedEntities) && dcl.IsEmptyValueIndirect(rawDesired.AssociatedEntities) { + rawNew.AssociatedEntities = rawDesired.AssociatedEntities + } else { + } + + return rawNew, nil +} + +func canonicalizeTargetGke(des, initial *TargetGke, opts ...dcl.ApplyOption) *TargetGke { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetGke{} + + if dcl.IsZeroValue(des.Cluster) || (dcl.IsEmptyValueIndirect(des.Cluster) && dcl.IsEmptyValueIndirect(initial.Cluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Cluster = initial.Cluster + } else { + cDes.Cluster = des.Cluster + } + if dcl.BoolCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { + cDes.InternalIP = initial.InternalIP + } else { + cDes.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, initial.ProxyUrl) || dcl.IsZeroValue(des.ProxyUrl) { + cDes.ProxyUrl = initial.ProxyUrl + } else { + cDes.ProxyUrl = des.ProxyUrl + } + if dcl.BoolCanonicalize(des.DnsEndpoint, initial.DnsEndpoint) || dcl.IsZeroValue(des.DnsEndpoint) { + cDes.DnsEndpoint = initial.DnsEndpoint + } else { + cDes.DnsEndpoint = des.DnsEndpoint + } + + return cDes +} + +func canonicalizeTargetGkeSlice(des, initial []TargetGke, opts ...dcl.ApplyOption) []TargetGke { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetGke, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetGke(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetGke, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetGke(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetGke(c *Client, des, nw *TargetGke) *TargetGke { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetGke while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.InternalIP, nw.InternalIP) { + nw.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, nw.ProxyUrl) { + nw.ProxyUrl = des.ProxyUrl + } + if dcl.BoolCanonicalize(des.DnsEndpoint, nw.DnsEndpoint) { + nw.DnsEndpoint = des.DnsEndpoint + } + + return nw +} + +func canonicalizeNewTargetGkeSet(c *Client, des, nw []TargetGke) []TargetGke { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetGke + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetGkeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetGke(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetGkeSlice(c *Client, des, nw []TargetGke) []TargetGke { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetGke + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetGke(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAnthosCluster(des, initial *TargetAnthosCluster, opts ...dcl.ApplyOption) *TargetAnthosCluster { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAnthosCluster{} + + if dcl.IsZeroValue(des.Membership) || (dcl.IsEmptyValueIndirect(des.Membership) && dcl.IsEmptyValueIndirect(initial.Membership)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Membership = initial.Membership + } else { + cDes.Membership = des.Membership + } + + return cDes +} + +func canonicalizeTargetAnthosClusterSlice(des, initial []TargetAnthosCluster, opts ...dcl.ApplyOption) []TargetAnthosCluster { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAnthosCluster, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAnthosCluster(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAnthosCluster, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAnthosCluster(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAnthosCluster(c *Client, des, nw *TargetAnthosCluster) *TargetAnthosCluster { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAnthosCluster while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetAnthosClusterSet(c *Client, des, nw []TargetAnthosCluster) []TargetAnthosCluster { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAnthosCluster + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAnthosClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAnthosCluster(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAnthosClusterSlice(c *Client, des, nw []TargetAnthosCluster) []TargetAnthosCluster { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAnthosCluster + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAnthosCluster(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetExecutionConfigs(des, initial *TargetExecutionConfigs, opts ...dcl.ApplyOption) *TargetExecutionConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetExecutionConfigs{} + + if dcl.IsZeroValue(des.Usages) || (dcl.IsEmptyValueIndirect(des.Usages) && dcl.IsEmptyValueIndirect(initial.Usages)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Usages = initial.Usages + } else { + cDes.Usages = des.Usages + } + if dcl.IsZeroValue(des.WorkerPool) || (dcl.IsEmptyValueIndirect(des.WorkerPool) && dcl.IsEmptyValueIndirect(initial.WorkerPool)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WorkerPool = initial.WorkerPool + } else { + cDes.WorkerPool = des.WorkerPool + } + if dcl.StringCanonicalize(des.ServiceAccount, initial.ServiceAccount) || dcl.IsZeroValue(des.ServiceAccount) { + cDes.ServiceAccount = initial.ServiceAccount + } else { + cDes.ServiceAccount = des.ServiceAccount + } + if dcl.StringCanonicalize(des.ArtifactStorage, initial.ArtifactStorage) || dcl.IsZeroValue(des.ArtifactStorage) { + cDes.ArtifactStorage = initial.ArtifactStorage + } else { + cDes.ArtifactStorage = des.ArtifactStorage + } + if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { + cDes.ExecutionTimeout = initial.ExecutionTimeout + } else { + cDes.ExecutionTimeout = des.ExecutionTimeout + } + if dcl.BoolCanonicalize(des.Verbose, initial.Verbose) || dcl.IsZeroValue(des.Verbose) { + cDes.Verbose = initial.Verbose + } else { + cDes.Verbose = des.Verbose + } + + return cDes +} + +func canonicalizeTargetExecutionConfigsSlice(des, initial []TargetExecutionConfigs, opts ...dcl.ApplyOption) []TargetExecutionConfigs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetExecutionConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetExecutionConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetExecutionConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetExecutionConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetExecutionConfigs(c *Client, des, nw *TargetExecutionConfigs) *TargetExecutionConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetExecutionConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ServiceAccount, nw.ServiceAccount) { + nw.ServiceAccount = des.ServiceAccount + } + if dcl.StringCanonicalize(des.ArtifactStorage, nw.ArtifactStorage) { + nw.ArtifactStorage = des.ArtifactStorage + } + if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { + nw.ExecutionTimeout = des.ExecutionTimeout + } + if dcl.BoolCanonicalize(des.Verbose, nw.Verbose) { + nw.Verbose = des.Verbose + } + + return nw +} + +func canonicalizeNewTargetExecutionConfigsSet(c *Client, des, nw []TargetExecutionConfigs) []TargetExecutionConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetExecutionConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetExecutionConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetExecutionConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetExecutionConfigsSlice(c *Client, des, nw []TargetExecutionConfigs) []TargetExecutionConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetExecutionConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetExecutionConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetRun(des, initial *TargetRun, opts ...dcl.ApplyOption) *TargetRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetRun{} + + if dcl.StringCanonicalize(des.Location, initial.Location) || dcl.IsZeroValue(des.Location) { + cDes.Location = initial.Location + } else { + cDes.Location = des.Location + } + + return cDes +} + +func canonicalizeTargetRunSlice(des, initial []TargetRun, opts ...dcl.ApplyOption) []TargetRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetRun(c *Client, des, nw *TargetRun) *TargetRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Location, nw.Location) { + nw.Location = des.Location + } + + return nw +} + +func canonicalizeNewTargetRunSet(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetRunSlice(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetRun(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetMultiTarget(des, initial *TargetMultiTarget, opts ...dcl.ApplyOption) *TargetMultiTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetMultiTarget{} + + if dcl.StringArrayCanonicalize(des.TargetIds, initial.TargetIds) { + cDes.TargetIds = initial.TargetIds + } else { + cDes.TargetIds = des.TargetIds + } + + return cDes +} + +func canonicalizeTargetMultiTargetSlice(des, initial []TargetMultiTarget, opts ...dcl.ApplyOption) []TargetMultiTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetMultiTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetMultiTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetMultiTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetMultiTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetMultiTarget(c *Client, des, nw *TargetMultiTarget) *TargetMultiTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetMultiTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.TargetIds, nw.TargetIds) { + nw.TargetIds = des.TargetIds + } + + return nw +} + +func canonicalizeNewTargetMultiTargetSet(c *Client, des, nw []TargetMultiTarget) []TargetMultiTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetMultiTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetMultiTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetMultiTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetMultiTargetSlice(c *Client, des, nw []TargetMultiTarget) []TargetMultiTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetMultiTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetMultiTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetCustomTarget(des, initial *TargetCustomTarget, opts ...dcl.ApplyOption) *TargetCustomTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetCustomTarget{} + + if dcl.IsZeroValue(des.CustomTargetType) || (dcl.IsEmptyValueIndirect(des.CustomTargetType) && dcl.IsEmptyValueIndirect(initial.CustomTargetType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.CustomTargetType = initial.CustomTargetType + } else { + cDes.CustomTargetType = des.CustomTargetType + } + + return cDes +} + +func canonicalizeTargetCustomTargetSlice(des, initial []TargetCustomTarget, opts ...dcl.ApplyOption) []TargetCustomTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetCustomTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetCustomTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetCustomTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetCustomTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetCustomTarget(c *Client, des, nw *TargetCustomTarget) *TargetCustomTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetCustomTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetCustomTargetSet(c *Client, des, nw []TargetCustomTarget) []TargetCustomTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetCustomTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetCustomTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetCustomTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetCustomTargetSlice(c *Client, des, nw []TargetCustomTarget) []TargetCustomTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetCustomTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetCustomTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntities(des, initial *TargetAssociatedEntities, opts ...dcl.ApplyOption) *TargetAssociatedEntities { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntities{} + + cDes.GkeClusters = canonicalizeTargetAssociatedEntitiesGkeClustersSlice(des.GkeClusters, initial.GkeClusters, opts...) + cDes.AnthosClusters = canonicalizeTargetAssociatedEntitiesAnthosClustersSlice(des.AnthosClusters, initial.AnthosClusters, opts...) + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesSlice(des, initial []TargetAssociatedEntities, opts ...dcl.ApplyOption) []TargetAssociatedEntities { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntities, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntities(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntities, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntities(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntities(c *Client, des, nw *TargetAssociatedEntities) *TargetAssociatedEntities { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntities while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GkeClusters = canonicalizeNewTargetAssociatedEntitiesGkeClustersSlice(c, des.GkeClusters, nw.GkeClusters) + nw.AnthosClusters = canonicalizeNewTargetAssociatedEntitiesAnthosClustersSlice(c, des.AnthosClusters, nw.AnthosClusters) + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesSet(c *Client, des, nw []TargetAssociatedEntities) []TargetAssociatedEntities { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntities + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntities(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesSlice(c *Client, des, nw []TargetAssociatedEntities) []TargetAssociatedEntities { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntities + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntities(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntitiesGkeClusters(des, initial *TargetAssociatedEntitiesGkeClusters, opts ...dcl.ApplyOption) *TargetAssociatedEntitiesGkeClusters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntitiesGkeClusters{} + + if dcl.IsZeroValue(des.Cluster) || (dcl.IsEmptyValueIndirect(des.Cluster) && dcl.IsEmptyValueIndirect(initial.Cluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Cluster = initial.Cluster + } else { + cDes.Cluster = des.Cluster + } + if dcl.BoolCanonicalize(des.InternalIP, initial.InternalIP) || dcl.IsZeroValue(des.InternalIP) { + cDes.InternalIP = initial.InternalIP + } else { + cDes.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, initial.ProxyUrl) || dcl.IsZeroValue(des.ProxyUrl) { + cDes.ProxyUrl = initial.ProxyUrl + } else { + cDes.ProxyUrl = des.ProxyUrl + } + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesGkeClustersSlice(des, initial []TargetAssociatedEntitiesGkeClusters, opts ...dcl.ApplyOption) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntitiesGkeClusters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntitiesGkeClusters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClusters(c *Client, des, nw *TargetAssociatedEntitiesGkeClusters) *TargetAssociatedEntitiesGkeClusters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntitiesGkeClusters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.InternalIP, nw.InternalIP) { + nw.InternalIP = des.InternalIP + } + if dcl.StringCanonicalize(des.ProxyUrl, nw.ProxyUrl) { + nw.ProxyUrl = des.ProxyUrl + } + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClustersSet(c *Client, des, nw []TargetAssociatedEntitiesGkeClusters) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntitiesGkeClusters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesGkeClustersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntitiesGkeClusters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesGkeClustersSlice(c *Client, des, nw []TargetAssociatedEntitiesGkeClusters) []TargetAssociatedEntitiesGkeClusters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntitiesGkeClusters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntitiesGkeClusters(c, &d, &n)) + } + + return items +} + +func canonicalizeTargetAssociatedEntitiesAnthosClusters(des, initial *TargetAssociatedEntitiesAnthosClusters, opts ...dcl.ApplyOption) *TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetAssociatedEntitiesAnthosClusters{} + + if dcl.IsZeroValue(des.Membership) || (dcl.IsEmptyValueIndirect(des.Membership) && dcl.IsEmptyValueIndirect(initial.Membership)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Membership = initial.Membership + } else { + cDes.Membership = des.Membership + } + + return cDes +} + +func canonicalizeTargetAssociatedEntitiesAnthosClustersSlice(des, initial []TargetAssociatedEntitiesAnthosClusters, opts ...dcl.ApplyOption) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetAssociatedEntitiesAnthosClusters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetAssociatedEntitiesAnthosClusters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c *Client, des, nw *TargetAssociatedEntitiesAnthosClusters) *TargetAssociatedEntitiesAnthosClusters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetAssociatedEntitiesAnthosClusters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClustersSet(c *Client, des, nw []TargetAssociatedEntitiesAnthosClusters) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetAssociatedEntitiesAnthosClusters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetAssociatedEntitiesAnthosClustersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetAssociatedEntitiesAnthosClustersSlice(c *Client, des, nw []TargetAssociatedEntitiesAnthosClusters) []TargetAssociatedEntitiesAnthosClusters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetAssociatedEntitiesAnthosClusters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetAssociatedEntitiesAnthosClusters(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffTarget(c *Client, desired, actual *Target, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.RequireApproval, actual.RequireApproval, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("RequireApproval")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Gke, actual.Gke, dcl.DiffInfo{ObjectFunction: compareTargetGkeNewStyle, EmptyObject: EmptyTargetGke, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Gke")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AnthosCluster, actual.AnthosCluster, dcl.DiffInfo{ObjectFunction: compareTargetAnthosClusterNewStyle, EmptyObject: EmptyTargetAnthosCluster, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AnthosCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionConfigs, actual.ExecutionConfigs, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareTargetExecutionConfigsNewStyle, EmptyObject: EmptyTargetExecutionConfigs, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ExecutionConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Run, actual.Run, dcl.DiffInfo{ObjectFunction: compareTargetRunNewStyle, EmptyObject: EmptyTargetRun, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Run")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MultiTarget, actual.MultiTarget, dcl.DiffInfo{ObjectFunction: compareTargetMultiTargetNewStyle, EmptyObject: EmptyTargetMultiTarget, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("MultiTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CustomTarget, actual.CustomTarget, dcl.DiffInfo{ObjectFunction: compareTargetCustomTargetNewStyle, EmptyObject: EmptyTargetCustomTarget, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("CustomTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssociatedEntities, actual.AssociatedEntities, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesNewStyle, EmptyObject: EmptyTargetAssociatedEntities, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AssociatedEntities")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareTargetGkeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetGke) + if !ok { + desiredNotPointer, ok := d.(TargetGke) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetGke or *TargetGke", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetGke) + if !ok { + actualNotPointer, ok := a.(TargetGke) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetGke", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyUrl, actual.ProxyUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ProxyUrl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DnsEndpoint, actual.DnsEndpoint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("DnsEndpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAnthosClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAnthosCluster) + if !ok { + desiredNotPointer, ok := d.(TargetAnthosCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAnthosCluster or *TargetAnthosCluster", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAnthosCluster) + if !ok { + actualNotPointer, ok := a.(TargetAnthosCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAnthosCluster", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetExecutionConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetExecutionConfigs) + if !ok { + desiredNotPointer, ok := d.(TargetExecutionConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetExecutionConfigs or *TargetExecutionConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetExecutionConfigs) + if !ok { + actualNotPointer, ok := a.(TargetExecutionConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetExecutionConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Usages, actual.Usages, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Usages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerPool, actual.WorkerPool, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("WorkerPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArtifactStorage, actual.ArtifactStorage, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ArtifactStorage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verbose, actual.Verbose, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Verbose")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetRun) + if !ok { + desiredNotPointer, ok := d.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun or *TargetRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetRun) + if !ok { + actualNotPointer, ok := a.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetMultiTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetMultiTarget) + if !ok { + desiredNotPointer, ok := d.(TargetMultiTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetMultiTarget or *TargetMultiTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetMultiTarget) + if !ok { + actualNotPointer, ok := a.(TargetMultiTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetMultiTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetIds, actual.TargetIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("TargetIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetCustomTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetCustomTarget) + if !ok { + desiredNotPointer, ok := d.(TargetCustomTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetCustomTarget or *TargetCustomTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetCustomTarget) + if !ok { + actualNotPointer, ok := a.(TargetCustomTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetCustomTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.CustomTargetType, actual.CustomTargetType, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("CustomTargetType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntities) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntities) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntities or *TargetAssociatedEntities", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntities) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntities) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntities", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GkeClusters, actual.GkeClusters, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesGkeClustersNewStyle, EmptyObject: EmptyTargetAssociatedEntitiesGkeClusters, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("GkeClusters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AnthosClusters, actual.AnthosClusters, dcl.DiffInfo{ObjectFunction: compareTargetAssociatedEntitiesAnthosClustersNewStyle, EmptyObject: EmptyTargetAssociatedEntitiesAnthosClusters, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("AnthosClusters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesGkeClustersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntitiesGkeClusters) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntitiesGkeClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesGkeClusters or *TargetAssociatedEntitiesGkeClusters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntitiesGkeClusters) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntitiesGkeClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesGkeClusters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIP, actual.InternalIP, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("InternalIp")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyUrl, actual.ProxyUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("ProxyUrl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareTargetAssociatedEntitiesAnthosClustersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetAssociatedEntitiesAnthosClusters) + if !ok { + desiredNotPointer, ok := d.(TargetAssociatedEntitiesAnthosClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesAnthosClusters or *TargetAssociatedEntitiesAnthosClusters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetAssociatedEntitiesAnthosClusters) + if !ok { + actualNotPointer, ok := a.(TargetAssociatedEntitiesAnthosClusters) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetAssociatedEntitiesAnthosClusters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Target) urlNormalized() *Target { + normalized := dcl.Copy(*r).(Target) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.TargetId = dcl.SelfLinkToName(r.TargetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Target) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateTarget" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Target resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Target) marshal(c *Client) ([]byte, error) { + m, err := expandTarget(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Target: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalTarget decodes JSON responses into the Target resource schema. +func unmarshalTarget(b []byte, c *Client, res *Target) (*Target, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapTarget(m, c, res) +} + +func unmarshalMapTarget(m map[string]interface{}, c *Client, res *Target) (*Target, error) { + + flattened := flattenTarget(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandTarget expands Target into a JSON request object. +func expandTarget(c *Client, f *Target) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.RequireApproval; dcl.ValueShouldBeSent(v) { + m["requireApproval"] = v + } + if v, err := expandTargetGke(c, f.Gke, res); err != nil { + return nil, fmt.Errorf("error expanding Gke into gke: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gke"] = v + } + if v, err := expandTargetAnthosCluster(c, f.AnthosCluster, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosCluster into anthosCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["anthosCluster"] = v + } + if v, err := expandTargetExecutionConfigsSlice(c, f.ExecutionConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding ExecutionConfigs into executionConfigs: %w", err) + } else if v != nil { + m["executionConfigs"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["run"] = v + } + if v, err := expandTargetMultiTarget(c, f.MultiTarget, res); err != nil { + return nil, fmt.Errorf("error expanding MultiTarget into multiTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["multiTarget"] = v + } + if v := f.DeployParameters; dcl.ValueShouldBeSent(v) { + m["deployParameters"] = v + } + if v, err := expandTargetCustomTarget(c, f.CustomTarget, res); err != nil { + return nil, fmt.Errorf("error expanding CustomTarget into customTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["customTarget"] = v + } + if v, err := expandTargetAssociatedEntitiesMap(c, f.AssociatedEntities, res); err != nil { + return nil, fmt.Errorf("error expanding AssociatedEntities into associatedEntities: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["associatedEntities"] = v + } + + return m, nil +} + +// flattenTarget flattens Target from a JSON request object into the +// Target type. +func flattenTarget(c *Client, i interface{}, res *Target) *Target { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Target{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.TargetId = dcl.FlattenString(m["targetId"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.RequireApproval = dcl.FlattenBool(m["requireApproval"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Gke = flattenTargetGke(c, m["gke"], res) + resultRes.AnthosCluster = flattenTargetAnthosCluster(c, m["anthosCluster"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.ExecutionConfigs = flattenTargetExecutionConfigsSlice(c, m["executionConfigs"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Run = flattenTargetRun(c, m["run"], res) + resultRes.MultiTarget = flattenTargetMultiTarget(c, m["multiTarget"], res) + resultRes.DeployParameters = dcl.FlattenKeyValuePairs(m["deployParameters"]) + resultRes.CustomTarget = flattenTargetCustomTarget(c, m["customTarget"], res) + resultRes.AssociatedEntities = flattenTargetAssociatedEntitiesMap(c, m["associatedEntities"], res) + + return resultRes +} + +// expandTargetGkeMap expands the contents of TargetGke into a JSON +// request object. +func expandTargetGkeMap(c *Client, f map[string]TargetGke, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetGke(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetGkeSlice expands the contents of TargetGke into a JSON +// request object. +func expandTargetGkeSlice(c *Client, f []TargetGke, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetGke(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetGkeMap flattens the contents of TargetGke from a JSON +// response object. +func flattenTargetGkeMap(c *Client, i interface{}, res *Target) map[string]TargetGke { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetGke{} + } + + if len(a) == 0 { + return map[string]TargetGke{} + } + + items := make(map[string]TargetGke) + for k, item := range a { + items[k] = *flattenTargetGke(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetGkeSlice flattens the contents of TargetGke from a JSON +// response object. +func flattenTargetGkeSlice(c *Client, i interface{}, res *Target) []TargetGke { + a, ok := i.([]interface{}) + if !ok { + return []TargetGke{} + } + + if len(a) == 0 { + return []TargetGke{} + } + + items := make([]TargetGke, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetGke(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetGke expands an instance of TargetGke into a JSON +// request object. +func expandTargetGke(c *Client, f *TargetGke, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Cluster; !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { + m["internalIp"] = v + } + if v := f.ProxyUrl; !dcl.IsEmptyValueIndirect(v) { + m["proxyUrl"] = v + } + if v := f.DnsEndpoint; !dcl.IsEmptyValueIndirect(v) { + m["dnsEndpoint"] = v + } + + return m, nil +} + +// flattenTargetGke flattens an instance of TargetGke from a JSON +// response object. +func flattenTargetGke(c *Client, i interface{}, res *Target) *TargetGke { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetGke{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetGke + } + r.Cluster = dcl.FlattenString(m["cluster"]) + r.InternalIP = dcl.FlattenBool(m["internalIp"]) + r.ProxyUrl = dcl.FlattenString(m["proxyUrl"]) + r.DnsEndpoint = dcl.FlattenBool(m["dnsEndpoint"]) + + return r +} + +// expandTargetAnthosClusterMap expands the contents of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosClusterMap(c *Client, f map[string]TargetAnthosCluster, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAnthosCluster(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAnthosClusterSlice expands the contents of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosClusterSlice(c *Client, f []TargetAnthosCluster, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAnthosCluster(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAnthosClusterMap flattens the contents of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosClusterMap(c *Client, i interface{}, res *Target) map[string]TargetAnthosCluster { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAnthosCluster{} + } + + if len(a) == 0 { + return map[string]TargetAnthosCluster{} + } + + items := make(map[string]TargetAnthosCluster) + for k, item := range a { + items[k] = *flattenTargetAnthosCluster(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAnthosClusterSlice flattens the contents of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosClusterSlice(c *Client, i interface{}, res *Target) []TargetAnthosCluster { + a, ok := i.([]interface{}) + if !ok { + return []TargetAnthosCluster{} + } + + if len(a) == 0 { + return []TargetAnthosCluster{} + } + + items := make([]TargetAnthosCluster, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAnthosCluster(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAnthosCluster expands an instance of TargetAnthosCluster into a JSON +// request object. +func expandTargetAnthosCluster(c *Client, f *TargetAnthosCluster, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Membership; !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + + return m, nil +} + +// flattenTargetAnthosCluster flattens an instance of TargetAnthosCluster from a JSON +// response object. +func flattenTargetAnthosCluster(c *Client, i interface{}, res *Target) *TargetAnthosCluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAnthosCluster{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAnthosCluster + } + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +// expandTargetExecutionConfigsMap expands the contents of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigsMap(c *Client, f map[string]TargetExecutionConfigs, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetExecutionConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetExecutionConfigsSlice expands the contents of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigsSlice(c *Client, f []TargetExecutionConfigs, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetExecutionConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetExecutionConfigsMap flattens the contents of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigsMap(c *Client, i interface{}, res *Target) map[string]TargetExecutionConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetExecutionConfigs{} + } + + if len(a) == 0 { + return map[string]TargetExecutionConfigs{} + } + + items := make(map[string]TargetExecutionConfigs) + for k, item := range a { + items[k] = *flattenTargetExecutionConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetExecutionConfigsSlice flattens the contents of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigsSlice(c *Client, i interface{}, res *Target) []TargetExecutionConfigs { + a, ok := i.([]interface{}) + if !ok { + return []TargetExecutionConfigs{} + } + + if len(a) == 0 { + return []TargetExecutionConfigs{} + } + + items := make([]TargetExecutionConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetExecutionConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetExecutionConfigs expands an instance of TargetExecutionConfigs into a JSON +// request object. +func expandTargetExecutionConfigs(c *Client, f *TargetExecutionConfigs, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Usages; v != nil { + m["usages"] = v + } + if v := f.WorkerPool; !dcl.IsEmptyValueIndirect(v) { + m["workerPool"] = v + } + if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccount"] = v + } + if v := f.ArtifactStorage; !dcl.IsEmptyValueIndirect(v) { + m["artifactStorage"] = v + } + if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["executionTimeout"] = v + } + if v := f.Verbose; !dcl.IsEmptyValueIndirect(v) { + m["verbose"] = v + } + + return m, nil +} + +// flattenTargetExecutionConfigs flattens an instance of TargetExecutionConfigs from a JSON +// response object. +func flattenTargetExecutionConfigs(c *Client, i interface{}, res *Target) *TargetExecutionConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetExecutionConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetExecutionConfigs + } + r.Usages = flattenTargetExecutionConfigsUsagesEnumSlice(c, m["usages"], res) + r.WorkerPool = dcl.FlattenString(m["workerPool"]) + r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + r.ArtifactStorage = dcl.FlattenString(m["artifactStorage"]) + r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) + r.Verbose = dcl.FlattenBool(m["verbose"]) + + return r +} + +// expandTargetRunMap expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunMap(c *Client, f map[string]TargetRun, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetRunSlice expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunSlice(c *Client, f []TargetRun, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetRunMap flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunMap(c *Client, i interface{}, res *Target) map[string]TargetRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetRun{} + } + + if len(a) == 0 { + return map[string]TargetRun{} + } + + items := make(map[string]TargetRun) + for k, item := range a { + items[k] = *flattenTargetRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetRunSlice flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunSlice(c *Client, i interface{}, res *Target) []TargetRun { + a, ok := i.([]interface{}) + if !ok { + return []TargetRun{} + } + + if len(a) == 0 { + return []TargetRun{} + } + + items := make([]TargetRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetRun expands an instance of TargetRun into a JSON +// request object. +func expandTargetRun(c *Client, f *TargetRun, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Location; !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenTargetRun flattens an instance of TargetRun from a JSON +// response object. +func flattenTargetRun(c *Client, i interface{}, res *Target) *TargetRun { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetRun + } + r.Location = dcl.FlattenString(m["location"]) + + return r +} + +// expandTargetMultiTargetMap expands the contents of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTargetMap(c *Client, f map[string]TargetMultiTarget, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetMultiTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetMultiTargetSlice expands the contents of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTargetSlice(c *Client, f []TargetMultiTarget, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetMultiTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetMultiTargetMap flattens the contents of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTargetMap(c *Client, i interface{}, res *Target) map[string]TargetMultiTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetMultiTarget{} + } + + if len(a) == 0 { + return map[string]TargetMultiTarget{} + } + + items := make(map[string]TargetMultiTarget) + for k, item := range a { + items[k] = *flattenTargetMultiTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetMultiTargetSlice flattens the contents of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTargetSlice(c *Client, i interface{}, res *Target) []TargetMultiTarget { + a, ok := i.([]interface{}) + if !ok { + return []TargetMultiTarget{} + } + + if len(a) == 0 { + return []TargetMultiTarget{} + } + + items := make([]TargetMultiTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetMultiTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetMultiTarget expands an instance of TargetMultiTarget into a JSON +// request object. +func expandTargetMultiTarget(c *Client, f *TargetMultiTarget, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetIds; v != nil { + m["targetIds"] = v + } + + return m, nil +} + +// flattenTargetMultiTarget flattens an instance of TargetMultiTarget from a JSON +// response object. +func flattenTargetMultiTarget(c *Client, i interface{}, res *Target) *TargetMultiTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetMultiTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetMultiTarget + } + r.TargetIds = dcl.FlattenStringSlice(m["targetIds"]) + + return r +} + +// expandTargetCustomTargetMap expands the contents of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTargetMap(c *Client, f map[string]TargetCustomTarget, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetCustomTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetCustomTargetSlice expands the contents of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTargetSlice(c *Client, f []TargetCustomTarget, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetCustomTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetCustomTargetMap flattens the contents of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTargetMap(c *Client, i interface{}, res *Target) map[string]TargetCustomTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetCustomTarget{} + } + + if len(a) == 0 { + return map[string]TargetCustomTarget{} + } + + items := make(map[string]TargetCustomTarget) + for k, item := range a { + items[k] = *flattenTargetCustomTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetCustomTargetSlice flattens the contents of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTargetSlice(c *Client, i interface{}, res *Target) []TargetCustomTarget { + a, ok := i.([]interface{}) + if !ok { + return []TargetCustomTarget{} + } + + if len(a) == 0 { + return []TargetCustomTarget{} + } + + items := make([]TargetCustomTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetCustomTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetCustomTarget expands an instance of TargetCustomTarget into a JSON +// request object. +func expandTargetCustomTarget(c *Client, f *TargetCustomTarget, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.CustomTargetType; !dcl.IsEmptyValueIndirect(v) { + m["customTargetType"] = v + } + + return m, nil +} + +// flattenTargetCustomTarget flattens an instance of TargetCustomTarget from a JSON +// response object. +func flattenTargetCustomTarget(c *Client, i interface{}, res *Target) *TargetCustomTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetCustomTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetCustomTarget + } + r.CustomTargetType = dcl.FlattenString(m["customTargetType"]) + + return r +} + +// expandTargetAssociatedEntitiesMap expands the contents of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntitiesMap(c *Client, f map[string]TargetAssociatedEntities, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntities(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesSlice expands the contents of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntitiesSlice(c *Client, f []TargetAssociatedEntities, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntities(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesMap flattens the contents of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntitiesMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntities { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntities{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntities{} + } + + items := make(map[string]TargetAssociatedEntities) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntities(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesSlice flattens the contents of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntitiesSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntities { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntities{} + } + + if len(a) == 0 { + return []TargetAssociatedEntities{} + } + + items := make([]TargetAssociatedEntities, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntities(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntities expands an instance of TargetAssociatedEntities into a JSON +// request object. +func expandTargetAssociatedEntities(c *Client, f *TargetAssociatedEntities, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandTargetAssociatedEntitiesGkeClustersSlice(c, f.GkeClusters, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusters into gkeClusters: %w", err) + } else if v != nil { + m["gkeClusters"] = v + } + if v, err := expandTargetAssociatedEntitiesAnthosClustersSlice(c, f.AnthosClusters, res); err != nil { + return nil, fmt.Errorf("error expanding AnthosClusters into anthosClusters: %w", err) + } else if v != nil { + m["anthosClusters"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntities flattens an instance of TargetAssociatedEntities from a JSON +// response object. +func flattenTargetAssociatedEntities(c *Client, i interface{}, res *Target) *TargetAssociatedEntities { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntities{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntities + } + r.GkeClusters = flattenTargetAssociatedEntitiesGkeClustersSlice(c, m["gkeClusters"], res) + r.AnthosClusters = flattenTargetAssociatedEntitiesAnthosClustersSlice(c, m["anthosClusters"], res) + + return r +} + +// expandTargetAssociatedEntitiesGkeClustersMap expands the contents of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClustersMap(c *Client, f map[string]TargetAssociatedEntitiesGkeClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntitiesGkeClusters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesGkeClustersSlice expands the contents of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClustersSlice(c *Client, f []TargetAssociatedEntitiesGkeClusters, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntitiesGkeClusters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesGkeClustersMap flattens the contents of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClustersMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntitiesGkeClusters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntitiesGkeClusters{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntitiesGkeClusters{} + } + + items := make(map[string]TargetAssociatedEntitiesGkeClusters) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntitiesGkeClusters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesGkeClustersSlice flattens the contents of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClustersSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntitiesGkeClusters { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntitiesGkeClusters{} + } + + if len(a) == 0 { + return []TargetAssociatedEntitiesGkeClusters{} + } + + items := make([]TargetAssociatedEntitiesGkeClusters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntitiesGkeClusters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntitiesGkeClusters expands an instance of TargetAssociatedEntitiesGkeClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesGkeClusters(c *Client, f *TargetAssociatedEntitiesGkeClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Cluster; !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + if v := f.InternalIP; !dcl.IsEmptyValueIndirect(v) { + m["internalIp"] = v + } + if v := f.ProxyUrl; !dcl.IsEmptyValueIndirect(v) { + m["proxyUrl"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntitiesGkeClusters flattens an instance of TargetAssociatedEntitiesGkeClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesGkeClusters(c *Client, i interface{}, res *Target) *TargetAssociatedEntitiesGkeClusters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntitiesGkeClusters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntitiesGkeClusters + } + r.Cluster = dcl.FlattenString(m["cluster"]) + r.InternalIP = dcl.FlattenBool(m["internalIp"]) + r.ProxyUrl = dcl.FlattenString(m["proxyUrl"]) + + return r +} + +// expandTargetAssociatedEntitiesAnthosClustersMap expands the contents of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClustersMap(c *Client, f map[string]TargetAssociatedEntitiesAnthosClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetAssociatedEntitiesAnthosClusters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetAssociatedEntitiesAnthosClustersSlice expands the contents of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClustersSlice(c *Client, f []TargetAssociatedEntitiesAnthosClusters, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetAssociatedEntitiesAnthosClusters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetAssociatedEntitiesAnthosClustersMap flattens the contents of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClustersMap(c *Client, i interface{}, res *Target) map[string]TargetAssociatedEntitiesAnthosClusters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetAssociatedEntitiesAnthosClusters{} + } + + if len(a) == 0 { + return map[string]TargetAssociatedEntitiesAnthosClusters{} + } + + items := make(map[string]TargetAssociatedEntitiesAnthosClusters) + for k, item := range a { + items[k] = *flattenTargetAssociatedEntitiesAnthosClusters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetAssociatedEntitiesAnthosClustersSlice flattens the contents of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClustersSlice(c *Client, i interface{}, res *Target) []TargetAssociatedEntitiesAnthosClusters { + a, ok := i.([]interface{}) + if !ok { + return []TargetAssociatedEntitiesAnthosClusters{} + } + + if len(a) == 0 { + return []TargetAssociatedEntitiesAnthosClusters{} + } + + items := make([]TargetAssociatedEntitiesAnthosClusters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetAssociatedEntitiesAnthosClusters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetAssociatedEntitiesAnthosClusters expands an instance of TargetAssociatedEntitiesAnthosClusters into a JSON +// request object. +func expandTargetAssociatedEntitiesAnthosClusters(c *Client, f *TargetAssociatedEntitiesAnthosClusters, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Membership; !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + + return m, nil +} + +// flattenTargetAssociatedEntitiesAnthosClusters flattens an instance of TargetAssociatedEntitiesAnthosClusters from a JSON +// response object. +func flattenTargetAssociatedEntitiesAnthosClusters(c *Client, i interface{}, res *Target) *TargetAssociatedEntitiesAnthosClusters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetAssociatedEntitiesAnthosClusters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetAssociatedEntitiesAnthosClusters + } + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +// flattenTargetExecutionConfigsUsagesEnumMap flattens the contents of TargetExecutionConfigsUsagesEnum from a JSON +// response object. +func flattenTargetExecutionConfigsUsagesEnumMap(c *Client, i interface{}, res *Target) map[string]TargetExecutionConfigsUsagesEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetExecutionConfigsUsagesEnum{} + } + + if len(a) == 0 { + return map[string]TargetExecutionConfigsUsagesEnum{} + } + + items := make(map[string]TargetExecutionConfigsUsagesEnum) + for k, item := range a { + items[k] = *flattenTargetExecutionConfigsUsagesEnum(item.(interface{})) + } + + return items +} + +// flattenTargetExecutionConfigsUsagesEnumSlice flattens the contents of TargetExecutionConfigsUsagesEnum from a JSON +// response object. +func flattenTargetExecutionConfigsUsagesEnumSlice(c *Client, i interface{}, res *Target) []TargetExecutionConfigsUsagesEnum { + a, ok := i.([]interface{}) + if !ok { + return []TargetExecutionConfigsUsagesEnum{} + } + + if len(a) == 0 { + return []TargetExecutionConfigsUsagesEnum{} + } + + items := make([]TargetExecutionConfigsUsagesEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetExecutionConfigsUsagesEnum(item.(interface{}))) + } + + return items +} + +// flattenTargetExecutionConfigsUsagesEnum asserts that an interface is a string, and returns a +// pointer to a *TargetExecutionConfigsUsagesEnum with the same value as that string. +func flattenTargetExecutionConfigsUsagesEnum(i interface{}) *TargetExecutionConfigsUsagesEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return TargetExecutionConfigsUsagesEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Target) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalTarget(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type targetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp targetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToTargetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]targetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []targetDiff + // For each operation name, create a targetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := targetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToTargetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToTargetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (targetApiOperation, error) { + switch opName { + + case "updateTargetUpdateTargetOperation": + return &updateTargetUpdateTargetOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractTargetFields(r *Target) error { + vGke := r.Gke + if vGke == nil { + // note: explicitly not the empty object. + vGke = &TargetGke{} + } + if err := extractTargetGkeFields(r, vGke); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGke) { + r.Gke = vGke + } + vAnthosCluster := r.AnthosCluster + if vAnthosCluster == nil { + // note: explicitly not the empty object. + vAnthosCluster = &TargetAnthosCluster{} + } + if err := extractTargetAnthosClusterFields(r, vAnthosCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAnthosCluster) { + r.AnthosCluster = vAnthosCluster + } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := extractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } + vMultiTarget := r.MultiTarget + if vMultiTarget == nil { + // note: explicitly not the empty object. + vMultiTarget = &TargetMultiTarget{} + } + if err := extractTargetMultiTargetFields(r, vMultiTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMultiTarget) { + r.MultiTarget = vMultiTarget + } + vCustomTarget := r.CustomTarget + if vCustomTarget == nil { + // note: explicitly not the empty object. + vCustomTarget = &TargetCustomTarget{} + } + if err := extractTargetCustomTargetFields(r, vCustomTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomTarget) { + r.CustomTarget = vCustomTarget + } + return nil +} +func extractTargetGkeFields(r *Target, o *TargetGke) error { + return nil +} +func extractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) error { + return nil +} +func extractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { + return nil +} +func extractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} +func extractTargetMultiTargetFields(r *Target, o *TargetMultiTarget) error { + return nil +} +func extractTargetCustomTargetFields(r *Target, o *TargetCustomTarget) error { + return nil +} +func extractTargetAssociatedEntitiesFields(r *Target, o *TargetAssociatedEntities) error { + return nil +} +func extractTargetAssociatedEntitiesGkeClustersFields(r *Target, o *TargetAssociatedEntitiesGkeClusters) error { + return nil +} +func extractTargetAssociatedEntitiesAnthosClustersFields(r *Target, o *TargetAssociatedEntitiesAnthosClusters) error { + return nil +} + +func postReadExtractTargetFields(r *Target) error { + vGke := r.Gke + if vGke == nil { + // note: explicitly not the empty object. + vGke = &TargetGke{} + } + if err := postReadExtractTargetGkeFields(r, vGke); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGke) { + r.Gke = vGke + } + vAnthosCluster := r.AnthosCluster + if vAnthosCluster == nil { + // note: explicitly not the empty object. + vAnthosCluster = &TargetAnthosCluster{} + } + if err := postReadExtractTargetAnthosClusterFields(r, vAnthosCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAnthosCluster) { + r.AnthosCluster = vAnthosCluster + } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := postReadExtractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } + vMultiTarget := r.MultiTarget + if vMultiTarget == nil { + // note: explicitly not the empty object. + vMultiTarget = &TargetMultiTarget{} + } + if err := postReadExtractTargetMultiTargetFields(r, vMultiTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMultiTarget) { + r.MultiTarget = vMultiTarget + } + vCustomTarget := r.CustomTarget + if vCustomTarget == nil { + // note: explicitly not the empty object. + vCustomTarget = &TargetCustomTarget{} + } + if err := postReadExtractTargetCustomTargetFields(r, vCustomTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomTarget) { + r.CustomTarget = vCustomTarget + } + return nil +} +func postReadExtractTargetGkeFields(r *Target, o *TargetGke) error { + return nil +} +func postReadExtractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) error { + return nil +} +func postReadExtractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { + return nil +} +func postReadExtractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} +func postReadExtractTargetMultiTargetFields(r *Target, o *TargetMultiTarget) error { + return nil +} +func postReadExtractTargetCustomTargetFields(r *Target, o *TargetCustomTarget) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesFields(r *Target, o *TargetAssociatedEntities) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesGkeClustersFields(r *Target, o *TargetAssociatedEntitiesGkeClusters) error { + return nil +} +func postReadExtractTargetAssociatedEntitiesAnthosClustersFields(r *Target, o *TargetAssociatedEntitiesAnthosClusters) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl new file mode 100644 index 000000000000..3d18ed795cd7 --- /dev/null +++ b/mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl @@ -0,0 +1,488 @@ +package clouddeploy + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLTargetSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Clouddeploy/Target", + Description: "The Cloud Deploy `Target` resource", + StructName: "Target", + Reference: &dcl.Link{ + Text: "REST API", + URL: "https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets", + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Target", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "target", + Required: true, + Description: "A full instance of a Target", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Target", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "target", + Required: true, + Description: "A full instance of a Target", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Target", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "target", + Required: true, + Description: "A full instance of a Target", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Target", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Target", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Target": &dcl.Component{ + Title: "Target", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + }, + "anthosCluster": &dcl.Property{ + Type: "object", + GoName: "AnthosCluster", + GoType: "TargetAnthosCluster", + Description: "Information specifying an Anthos Cluster.", + Conflicts: []string{ + "gke", + "run", + "multiTarget", + "customTarget", + }, + Properties: map[string]*dcl.Property{ + "membership": &dcl.Property{ + Type: "string", + GoName: "Membership", + Description: "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkehub/Membership", + Field: "selfLink", + }, + }, + }, + }, + }, + "associatedEntities": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "object", + GoType: "TargetAssociatedEntities", + Properties: map[string]*dcl.Property{ + "anthosClusters": &dcl.Property{ + Type: "array", + GoName: "AnthosClusters", + Description: "Optional. Information specifying Anthos clusters as associated entities.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "TargetAssociatedEntitiesAnthosClusters", + Properties: map[string]*dcl.Property{ + "membership": &dcl.Property{ + Type: "string", + GoName: "Membership", + Description: "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkehub/Membership", + Field: "selfLink", + }, + }, + }, + }, + }, + }, + "gkeClusters": &dcl.Property{ + Type: "array", + GoName: "GkeClusters", + Description: "Optional. Information specifying GKE clusters as associated entities.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "TargetAssociatedEntitiesGkeClusters", + Properties: map[string]*dcl.Property{ + "cluster": &dcl.Property{ + Type: "string", + GoName: "Cluster", + Description: "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "selfLink", + }, + }, + }, + "internalIP": &dcl.Property{ + Type: "boolean", + GoName: "InternalIP", + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + "proxyUrl": &dcl.Property{ + Type: "string", + GoName: "ProxyUrl", + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + }, + }, + }, + }, + GoName: "AssociatedEntities", + Description: "Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Time at which the `Target` was created.", + Immutable: true, + }, + "customTarget": &dcl.Property{ + Type: "object", + GoName: "CustomTarget", + GoType: "TargetCustomTarget", + Description: "Optional. Information specifying a Custom Target.", + Conflicts: []string{ + "gke", + "anthosCluster", + "run", + "multiTarget", + }, + Required: []string{ + "customTargetType", + }, + Properties: map[string]*dcl.Property{ + "customTargetType": &dcl.Property{ + Type: "string", + GoName: "CustomTargetType", + Description: "Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Clouddeploy/CustomTargetType", + Field: "selfLink", + }, + }, + }, + }, + }, + "deployParameters": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DeployParameters", + Description: "Optional. The deploy parameters to use for this target.", + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. Description of the `Target`. Max length is 255 characters.", + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "executionConfigs": &dcl.Property{ + Type: "array", + GoName: "ExecutionConfigs", + Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "TargetExecutionConfigs", + Required: []string{ + "usages", + }, + Properties: map[string]*dcl.Property{ + "artifactStorage": &dcl.Property{ + Type: "string", + GoName: "ArtifactStorage", + Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", + ServerDefault: true, + }, + "executionTimeout": &dcl.Property{ + Type: "string", + GoName: "ExecutionTimeout", + Description: "Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used.", + ServerDefault: true, + }, + "serviceAccount": &dcl.Property{ + Type: "string", + GoName: "ServiceAccount", + Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", + ServerDefault: true, + }, + "usages": &dcl.Property{ + Type: "array", + GoName: "Usages", + Description: "Required. Usages when this configuration should be applied.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "TargetExecutionConfigsUsagesEnum", + Enum: []string{ + "EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED", + "RENDER", + "DEPLOY", + }, + }, + }, + "verbose": &dcl.Property{ + Type: "boolean", + GoName: "Verbose", + Description: "Optional. If true, additional logging will be enabled when running builds in this execution environment.", + }, + "workerPool": &dcl.Property{ + Type: "string", + GoName: "WorkerPool", + Description: "Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudbuild/WorkerPool", + Field: "selfLink", + }, + }, + }, + }, + }, + }, + "gke": &dcl.Property{ + Type: "object", + GoName: "Gke", + GoType: "TargetGke", + Description: "Information specifying a GKE Cluster.", + Conflicts: []string{ + "anthosCluster", + "run", + "multiTarget", + "customTarget", + }, + Properties: map[string]*dcl.Property{ + "cluster": &dcl.Property{ + Type: "string", + GoName: "Cluster", + Description: "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "selfLink", + }, + }, + }, + "dnsEndpoint": &dcl.Property{ + Type: "boolean", + GoName: "DnsEndpoint", + Description: "Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true.", + }, + "internalIP": &dcl.Property{ + Type: "boolean", + GoName: "InternalIP", + Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", + }, + "proxyUrl": &dcl.Property{ + Type: "string", + GoName: "ProxyUrl", + Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "multiTarget": &dcl.Property{ + Type: "object", + GoName: "MultiTarget", + GoType: "TargetMultiTarget", + Description: "Information specifying a multiTarget.", + Conflicts: []string{ + "gke", + "anthosCluster", + "run", + "customTarget", + }, + Required: []string{ + "targetIds", + }, + Properties: map[string]*dcl.Property{ + "targetIds": &dcl.Property{ + Type: "array", + GoName: "TargetIds", + Description: "Required. The target_ids of this multiTarget.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", + Immutable: true, + Parameter: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "requireApproval": &dcl.Property{ + Type: "boolean", + GoName: "RequireApproval", + Description: "Optional. Whether or not the `Target` requires approval.", + }, + "run": &dcl.Property{ + Type: "object", + GoName: "Run", + GoType: "TargetRun", + Description: "Information specifying a Cloud Run deployment target.", + Conflicts: []string{ + "gke", + "anthosCluster", + "multiTarget", + "customTarget", + }, + Required: []string{ + "location", + }, + Properties: map[string]*dcl.Property{ + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + }, + "targetId": &dcl.Property{ + Type: "string", + GoName: "TargetId", + ReadOnly: true, + Description: "Output only. Resource id of the `Target`.", + Immutable: true, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. Unique identifier of the `Target`.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Most recent time at which the `Target` was updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/client.go.tmpl b/mmv1/third_party/terraform/services/containeraws/client.go.tmpl new file mode 100644 index 000000000000..e452af5f3c03 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/client.go.tmpl @@ -0,0 +1,18 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl new file mode 100644 index 000000000000..9a665ed8eb63 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl @@ -0,0 +1,1587 @@ +package containeraws + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Cluster struct { + Name *string `json:"name"` + Description *string `json:"description"` + Networking *ClusterNetworking `json:"networking"` + AwsRegion *string `json:"awsRegion"` + ControlPlane *ClusterControlPlane `json:"controlPlane"` + Authorization *ClusterAuthorization `json:"authorization"` + State *ClusterStateEnum `json:"state"` + Endpoint *string `json:"endpoint"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + WorkloadIdentityConfig *ClusterWorkloadIdentityConfig `json:"workloadIdentityConfig"` + Project *string `json:"project"` + Location *string `json:"location"` + Fleet *ClusterFleet `json:"fleet"` +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig *ClusterLoggingConfig `json:"loggingConfig"` + MonitoringConfig *ClusterMonitoringConfig `json:"monitoringConfig"` +{{- end }} + BinaryAuthorization *ClusterBinaryAuthorization `json:"binaryAuthorization"` +} + +func (r *Cluster) String() string { + return dcl.SprintResource(r) +} + +// The enum ClusterControlPlaneRootVolumeVolumeTypeEnum. +type ClusterControlPlaneRootVolumeVolumeTypeEnum string + +// ClusterControlPlaneRootVolumeVolumeTypeEnumRef returns a *ClusterControlPlaneRootVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneRootVolumeVolumeTypeEnumRef(s string) *ClusterControlPlaneRootVolumeVolumeTypeEnum { + v := ClusterControlPlaneRootVolumeVolumeTypeEnum(s) + return &v +} + +func (v ClusterControlPlaneRootVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneRootVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterControlPlaneMainVolumeVolumeTypeEnum. +type ClusterControlPlaneMainVolumeVolumeTypeEnum string + +// ClusterControlPlaneMainVolumeVolumeTypeEnumRef returns a *ClusterControlPlaneMainVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneMainVolumeVolumeTypeEnumRef(s string) *ClusterControlPlaneMainVolumeVolumeTypeEnum { + v := ClusterControlPlaneMainVolumeVolumeTypeEnum(s) + return &v +} + +func (v ClusterControlPlaneMainVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneMainVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- if ne $.TargetVersionName "ga" }} +// The enum ClusterControlPlaneInstancePlacementTenancyEnum. +type ClusterControlPlaneInstancePlacementTenancyEnum string + +// ClusterControlPlaneInstancePlacementTenancyEnumRef returns a *ClusterControlPlaneInstancePlacementTenancyEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterControlPlaneInstancePlacementTenancyEnumRef(s string) *ClusterControlPlaneInstancePlacementTenancyEnum { + v := ClusterControlPlaneInstancePlacementTenancyEnum(s) + return &v +} + +func (v ClusterControlPlaneInstancePlacementTenancyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TENANCY_UNSPECIFIED", "DEFAULT", "DEDICATED", "HOST"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterControlPlaneInstancePlacementTenancyEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- end }} +// The enum ClusterStateEnum. +type ClusterStateEnum string + +// ClusterStateEnumRef returns a *ClusterStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStateEnumRef(s string) *ClusterStateEnum { + v := ClusterStateEnum(s) + return &v +} + +func (v ClusterStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStateEnum", +{{- if ne $.TargetVersionName "ga" }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterLoggingConfigComponentConfigEnableComponentsEnum. +type ClusterLoggingConfigComponentConfigEnableComponentsEnum string + +// ClusterLoggingConfigComponentConfigEnableComponentsEnumRef returns a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s string) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + v := ClusterLoggingConfigComponentConfigEnableComponentsEnum(s) + return &v +} + +func (v ClusterLoggingConfigComponentConfigEnableComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "SYSTEM_COMPONENTS", "WORKLOADS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", +{{- end }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterBinaryAuthorizationEvaluationModeEnum. +type ClusterBinaryAuthorizationEvaluationModeEnum string + +// ClusterBinaryAuthorizationEvaluationModeEnumRef returns a *ClusterBinaryAuthorizationEvaluationModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterBinaryAuthorizationEvaluationModeEnumRef(s string) *ClusterBinaryAuthorizationEvaluationModeEnum { + v := ClusterBinaryAuthorizationEvaluationModeEnum(s) + return &v +} + +func (v ClusterBinaryAuthorizationEvaluationModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterBinaryAuthorizationEvaluationModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ClusterNetworking struct { + empty bool `json:"-"` + VPCId *string `json:"vpcId"` + PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` + ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` + PerNodePoolSgRulesDisabled *bool `json:"perNodePoolSgRulesDisabled"` +} + +type jsonClusterNetworking ClusterNetworking + +func (r *ClusterNetworking) UnmarshalJSON(data []byte) error { + var res jsonClusterNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterNetworking + } else { + + r.VPCId = res.VPCId + + r.PodAddressCidrBlocks = res.PodAddressCidrBlocks + + r.ServiceAddressCidrBlocks = res.ServiceAddressCidrBlocks + + r.PerNodePoolSgRulesDisabled = res.PerNodePoolSgRulesDisabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterNetworking *ClusterNetworking = &ClusterNetworking{empty: true} + +func (r *ClusterNetworking) Empty() bool { + return r.empty +} + +func (r *ClusterNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlane struct { + empty bool `json:"-"` + Version *string `json:"version"` + InstanceType *string `json:"instanceType"` + SshConfig *ClusterControlPlaneSshConfig `json:"sshConfig"` + SubnetIds []string `json:"subnetIds"` + ConfigEncryption *ClusterControlPlaneConfigEncryption `json:"configEncryption"` + SecurityGroupIds []string `json:"securityGroupIds"` + IamInstanceProfile *string `json:"iamInstanceProfile"` + RootVolume *ClusterControlPlaneRootVolume `json:"rootVolume"` + MainVolume *ClusterControlPlaneMainVolume `json:"mainVolume"` + DatabaseEncryption *ClusterControlPlaneDatabaseEncryption `json:"databaseEncryption"` + Tags map[string]string `json:"tags"` + AwsServicesAuthentication *ClusterControlPlaneAwsServicesAuthentication `json:"awsServicesAuthentication"` + ProxyConfig *ClusterControlPlaneProxyConfig `json:"proxyConfig"` +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement *ClusterControlPlaneInstancePlacement `json:"instancePlacement"` +{{- end }} +} + +type jsonClusterControlPlane ClusterControlPlane + +func (r *ClusterControlPlane) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlane + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlane + } else { + + r.Version = res.Version + + r.InstanceType = res.InstanceType + + r.SshConfig = res.SshConfig + + r.SubnetIds = res.SubnetIds + + r.ConfigEncryption = res.ConfigEncryption + + r.SecurityGroupIds = res.SecurityGroupIds + + r.IamInstanceProfile = res.IamInstanceProfile + + r.RootVolume = res.RootVolume + + r.MainVolume = res.MainVolume + + r.DatabaseEncryption = res.DatabaseEncryption + + r.Tags = res.Tags + + r.AwsServicesAuthentication = res.AwsServicesAuthentication + + r.ProxyConfig = res.ProxyConfig +{{- if ne $.TargetVersionName "ga" }} + + r.InstancePlacement = res.InstancePlacement +{{- end }} + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlane is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlane *ClusterControlPlane = &ClusterControlPlane{empty: true} + +func (r *ClusterControlPlane) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlane) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlane) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneSshConfig struct { + empty bool `json:"-"` + Ec2KeyPair *string `json:"ec2KeyPair"` +} + +type jsonClusterControlPlaneSshConfig ClusterControlPlaneSshConfig + +func (r *ClusterControlPlaneSshConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneSshConfig + } else { + + r.Ec2KeyPair = res.Ec2KeyPair + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneSshConfig *ClusterControlPlaneSshConfig = &ClusterControlPlaneSshConfig{empty: true} + +func (r *ClusterControlPlaneSshConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneConfigEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneConfigEncryption ClusterControlPlaneConfigEncryption + +func (r *ClusterControlPlaneConfigEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneConfigEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneConfigEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneConfigEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneConfigEncryption *ClusterControlPlaneConfigEncryption = &ClusterControlPlaneConfigEncryption{empty: true} + +func (r *ClusterControlPlaneConfigEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneConfigEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneConfigEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *ClusterControlPlaneRootVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneRootVolume ClusterControlPlaneRootVolume + +func (r *ClusterControlPlaneRootVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneRootVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneRootVolume *ClusterControlPlaneRootVolume = &ClusterControlPlaneRootVolume{empty: true} + +func (r *ClusterControlPlaneRootVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneMainVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *ClusterControlPlaneMainVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneMainVolume ClusterControlPlaneMainVolume + +func (r *ClusterControlPlaneMainVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneMainVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneMainVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneMainVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneMainVolume *ClusterControlPlaneMainVolume = &ClusterControlPlaneMainVolume{empty: true} + +func (r *ClusterControlPlaneMainVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneMainVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneMainVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneDatabaseEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonClusterControlPlaneDatabaseEncryption ClusterControlPlaneDatabaseEncryption + +func (r *ClusterControlPlaneDatabaseEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneDatabaseEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneDatabaseEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneDatabaseEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneDatabaseEncryption *ClusterControlPlaneDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{empty: true} + +func (r *ClusterControlPlaneDatabaseEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneDatabaseEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneDatabaseEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneAwsServicesAuthentication struct { + empty bool `json:"-"` + RoleArn *string `json:"roleArn"` + RoleSessionName *string `json:"roleSessionName"` +} + +type jsonClusterControlPlaneAwsServicesAuthentication ClusterControlPlaneAwsServicesAuthentication + +func (r *ClusterControlPlaneAwsServicesAuthentication) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneAwsServicesAuthentication + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneAwsServicesAuthentication + } else { + + r.RoleArn = res.RoleArn + + r.RoleSessionName = res.RoleSessionName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneAwsServicesAuthentication is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneAwsServicesAuthentication *ClusterControlPlaneAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{empty: true} + +func (r *ClusterControlPlaneAwsServicesAuthentication) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneAwsServicesAuthentication) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneAwsServicesAuthentication) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneProxyConfig struct { + empty bool `json:"-"` + SecretArn *string `json:"secretArn"` + SecretVersion *string `json:"secretVersion"` +} + +type jsonClusterControlPlaneProxyConfig ClusterControlPlaneProxyConfig + +func (r *ClusterControlPlaneProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneProxyConfig + } else { + + r.SecretArn = res.SecretArn + + r.SecretVersion = res.SecretVersion + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneProxyConfig *ClusterControlPlaneProxyConfig = &ClusterControlPlaneProxyConfig{empty: true} + +func (r *ClusterControlPlaneProxyConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type ClusterControlPlaneInstancePlacement struct { + empty bool `json:"-"` + Tenancy *ClusterControlPlaneInstancePlacementTenancyEnum `json:"tenancy"` +} + +type jsonClusterControlPlaneInstancePlacement ClusterControlPlaneInstancePlacement + +func (r *ClusterControlPlaneInstancePlacement) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneInstancePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneInstancePlacement + } else { + + r.Tenancy = res.Tenancy + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneInstancePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneInstancePlacement *ClusterControlPlaneInstancePlacement = &ClusterControlPlaneInstancePlacement{empty: true} + +func (r *ClusterControlPlaneInstancePlacement) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneInstancePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneInstancePlacement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type ClusterAuthorization struct { + empty bool `json:"-"` + AdminUsers []ClusterAuthorizationAdminUsers `json:"adminUsers"` + AdminGroups []ClusterAuthorizationAdminGroups `json:"adminGroups"` +} + +type jsonClusterAuthorization ClusterAuthorization + +func (r *ClusterAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorization + } else { + + r.AdminUsers = res.AdminUsers + + r.AdminGroups = res.AdminGroups + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorization *ClusterAuthorization = &ClusterAuthorization{empty: true} + +func (r *ClusterAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminUsers struct { + empty bool `json:"-"` + Username *string `json:"username"` +} + +type jsonClusterAuthorizationAdminUsers ClusterAuthorizationAdminUsers + +func (r *ClusterAuthorizationAdminUsers) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminUsers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminUsers + } else { + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminUsers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminUsers *ClusterAuthorizationAdminUsers = &ClusterAuthorizationAdminUsers{empty: true} + +func (r *ClusterAuthorizationAdminUsers) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminUsers) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminUsers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminGroups struct { + empty bool `json:"-"` + Group *string `json:"group"` +} + +type jsonClusterAuthorizationAdminGroups ClusterAuthorizationAdminGroups + +func (r *ClusterAuthorizationAdminGroups) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminGroups + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminGroups + } else { + + r.Group = res.Group + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminGroups is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminGroups *ClusterAuthorizationAdminGroups = &ClusterAuthorizationAdminGroups{empty: true} + +func (r *ClusterAuthorizationAdminGroups) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminGroups) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminGroups) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterWorkloadIdentityConfig struct { + empty bool `json:"-"` + IssuerUri *string `json:"issuerUri"` + WorkloadPool *string `json:"workloadPool"` + IdentityProvider *string `json:"identityProvider"` +} + +type jsonClusterWorkloadIdentityConfig ClusterWorkloadIdentityConfig + +func (r *ClusterWorkloadIdentityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterWorkloadIdentityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterWorkloadIdentityConfig + } else { + + r.IssuerUri = res.IssuerUri + + r.WorkloadPool = res.WorkloadPool + + r.IdentityProvider = res.IdentityProvider + + } + return nil +} + +// This object is used to assert a desired state where this ClusterWorkloadIdentityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterWorkloadIdentityConfig *ClusterWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{empty: true} + +func (r *ClusterWorkloadIdentityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterWorkloadIdentityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterWorkloadIdentityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterFleet struct { + empty bool `json:"-"` + Project *string `json:"project"` + Membership *string `json:"membership"` +} + +type jsonClusterFleet ClusterFleet + +func (r *ClusterFleet) UnmarshalJSON(data []byte) error { + var res jsonClusterFleet + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterFleet + } else { + + r.Project = res.Project + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this ClusterFleet is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterFleet *ClusterFleet = &ClusterFleet{empty: true} + +func (r *ClusterFleet) Empty() bool { + return r.empty +} + +func (r *ClusterFleet) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterFleet) HashCode() string { +{{- if ne $.TargetVersionName "ga" }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfig struct { + empty bool `json:"-"` + ComponentConfig *ClusterLoggingConfigComponentConfig `json:"componentConfig"` +} + +type jsonClusterLoggingConfig ClusterLoggingConfig + +func (r *ClusterLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfig + } else { + + r.ComponentConfig = res.ComponentConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfig *ClusterLoggingConfig = &ClusterLoggingConfig{empty: true} + +func (r *ClusterLoggingConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfigComponentConfig struct { + empty bool `json:"-"` + EnableComponents []ClusterLoggingConfigComponentConfigEnableComponentsEnum `json:"enableComponents"` +} + +type jsonClusterLoggingConfigComponentConfig ClusterLoggingConfigComponentConfig + +func (r *ClusterLoggingConfigComponentConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfigComponentConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfigComponentConfig + } else { + + r.EnableComponents = res.EnableComponents + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfigComponentConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfigComponentConfig *ClusterLoggingConfigComponentConfig = &ClusterLoggingConfigComponentConfig{empty: true} + +func (r *ClusterLoggingConfigComponentConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfigComponentConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfigComponentConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfig struct { + empty bool `json:"-"` + ManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig `json:"managedPrometheusConfig"` +} + +type jsonClusterMonitoringConfig ClusterMonitoringConfig + +func (r *ClusterMonitoringConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfig + } else { + + r.ManagedPrometheusConfig = res.ManagedPrometheusConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfig *ClusterMonitoringConfig = &ClusterMonitoringConfig{empty: true} + +func (r *ClusterMonitoringConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfigManagedPrometheusConfig struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` +} + +type jsonClusterMonitoringConfigManagedPrometheusConfig ClusterMonitoringConfigManagedPrometheusConfig + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfigManagedPrometheusConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfigManagedPrometheusConfig + } else { + + r.Enabled = res.Enabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfigManagedPrometheusConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfigManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{empty: true} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) HashCode() string { +{{- end }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterBinaryAuthorization struct { + empty bool `json:"-"` + EvaluationMode *ClusterBinaryAuthorizationEvaluationModeEnum `json:"evaluationMode"` +} + +type jsonClusterBinaryAuthorization ClusterBinaryAuthorization + +func (r *ClusterBinaryAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterBinaryAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterBinaryAuthorization + } else { + + r.EvaluationMode = res.EvaluationMode + + } + return nil +} + +// This object is used to assert a desired state where this ClusterBinaryAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterBinaryAuthorization *ClusterBinaryAuthorization = &ClusterBinaryAuthorization{empty: true} + +func (r *ClusterBinaryAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterBinaryAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterBinaryAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Cluster) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_aws", + Type: "Cluster", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containeraws", +{{- end }} + } +} + +func (r *Cluster) ID() (string, error) { + if err := extractClusterFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "description": dcl.ValueOrEmptyString(nr.Description), + "networking": dcl.ValueOrEmptyString(nr.Networking), + "aws_region": dcl.ValueOrEmptyString(nr.AwsRegion), + "control_plane": dcl.ValueOrEmptyString(nr.ControlPlane), + "authorization": dcl.ValueOrEmptyString(nr.Authorization), + "state": dcl.ValueOrEmptyString(nr.State), + "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "workload_identity_config": dcl.ValueOrEmptyString(nr.WorkloadIdentityConfig), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "fleet": dcl.ValueOrEmptyString(nr.Fleet), +{{- if ne $.TargetVersionName "ga" }} + "logging_config": dcl.ValueOrEmptyString(nr.LoggingConfig), + "monitoring_config": dcl.ValueOrEmptyString(nr.MonitoringConfig), +{{- end }} + "binary_authorization": dcl.ValueOrEmptyString(nr.BinaryAuthorization), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClusterMaxPage = -1 + +type ClusterList struct { + Items []*Cluster + + nextToken string + + pageSize int32 + + resource *Cluster +} + +func (l *ClusterList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClusterList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) + +} + +func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Cluster{ + Project: &project, + Location: &location, + } + items, token, err := c.listCluster(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClusterList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClusterFields(r) + + b, err := c.getClusterRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalCluster(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClusterNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClusterFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Cluster resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") + deleteOp := deleteClusterOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllCluster deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { + listObj, err := c.ListCluster(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Cluster + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClusterFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clusterApiOperation + if create { + ops = append(ops, &createClusterOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetCluster(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClusterOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapCluster(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClusterFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClusterFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffCluster(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl new file mode 100644 index 000000000000..bfb5df994069 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/cluster_internal.go.tmpl @@ -0,0 +1,7909 @@ +package containeraws + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Cluster) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "networking"); err != nil { + return err + } + if err := dcl.Required(r, "awsRegion"); err != nil { + return err + } + if err := dcl.Required(r, "controlPlane"); err != nil { + return err + } + if err := dcl.Required(r, "authorization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.Required(r, "fleet"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Networking) { + if err := r.Networking.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ControlPlane) { + if err := r.ControlPlane.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Authorization) { + if err := r.Authorization.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadIdentityConfig) { + if err := r.WorkloadIdentityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Fleet) { + if err := r.Fleet.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MonitoringConfig) { + if err := r.MonitoringConfig.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.BinaryAuthorization) { + if err := r.BinaryAuthorization.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterNetworking) validate() error { + if err := dcl.Required(r, "vpcId"); err != nil { + return err + } + if err := dcl.Required(r, "podAddressCidrBlocks"); err != nil { + return err + } + if err := dcl.Required(r, "serviceAddressCidrBlocks"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlane) validate() error { + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "subnetIds"); err != nil { + return err + } + if err := dcl.Required(r, "configEncryption"); err != nil { + return err + } + if err := dcl.Required(r, "iamInstanceProfile"); err != nil { + return err + } + if err := dcl.Required(r, "databaseEncryption"); err != nil { + return err + } + if err := dcl.Required(r, "awsServicesAuthentication"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ConfigEncryption) { + if err := r.ConfigEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MainVolume) { + if err := r.MainVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DatabaseEncryption) { + if err := r.DatabaseEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AwsServicesAuthentication) { + if err := r.AwsServicesAuthentication.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.InstancePlacement) { + if err := r.InstancePlacement.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *ClusterControlPlaneSshConfig) validate() error { + if err := dcl.Required(r, "ec2KeyPair"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneConfigEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneRootVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneMainVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneDatabaseEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneAwsServicesAuthentication) validate() error { + if err := dcl.Required(r, "roleArn"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneProxyConfig) validate() error { + if err := dcl.Required(r, "secretArn"); err != nil { + return err + } + if err := dcl.Required(r, "secretVersion"); err != nil { + return err + } + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *ClusterControlPlaneInstancePlacement) validate() error { + return nil +} +{{- end }} +func (r *ClusterAuthorization) validate() error { + if err := dcl.Required(r, "adminUsers"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminUsers) validate() error { + if err := dcl.Required(r, "username"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminGroups) validate() error { + if err := dcl.Required(r, "group"); err != nil { + return err + } + return nil +} +func (r *ClusterWorkloadIdentityConfig) validate() error { + return nil +} +func (r *ClusterFleet) validate() error { + if err := dcl.Required(r, "project"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *ClusterLoggingConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ComponentConfig) { + if err := r.ComponentConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterLoggingConfigComponentConfig) validate() error { + return nil +} +func (r *ClusterMonitoringConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedPrometheusConfig) { + if err := r.ManagedPrometheusConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterMonitoringConfigManagedPrometheusConfig) validate() error { +{{- end }} + return nil +} +func (r *ClusterBinaryAuthorization) validate() error { + return nil +} +func (r *Cluster) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *Cluster) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters?awsClusterId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// clusterApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clusterApiOperation interface { + do(context.Context, *Cluster, *Client) error +} + +// newUpdateClusterUpdateAwsClusterRequest creates a request for an +// Cluster resource's UpdateAwsCluster update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateClusterUpdateAwsClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["networking"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["authorization"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["monitoringConfig"] = v +{{- end }} + } + if v, err := expandClusterBinaryAuthorization(c, f.BinaryAuthorization, res); err != nil { + return nil, fmt.Errorf("error expanding BinaryAuthorization into binaryAuthorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["binaryAuthorization"] = v + } + b, err := c.getClusterRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateClusterUpdateAwsClusterRequest converts the update into +// the final JSON request body. +func marshalUpdateClusterUpdateAwsClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateClusterUpdateAwsClusterOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateClusterUpdateAwsClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + _, err := c.GetCluster(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAwsCluster") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateClusterUpdateAwsClusterRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateClusterUpdateAwsClusterRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClusterMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClusterOperation struct { + AwsClusters []map[string]interface{} `json:"awsClusters"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { + b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClusterOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Cluster + for _, v := range m.AwsClusters { + res, err := unmarshalMapCluster(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteCluster(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClusterOperation struct{} + +func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + r, err := c.GetCluster(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetCluster(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClusterOperation struct { + response map[string]interface{} +} + +func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetCluster(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Cluster + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Cluster); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetCluster(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClusterFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffCluster(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, nil, opts...) + rawDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, nil, opts...) + rawDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, nil, opts...) + rawDesired.WorkloadIdentityConfig = canonicalizeClusterWorkloadIdentityConfig(rawDesired.WorkloadIdentityConfig, nil, opts...) + rawDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, nil, opts...) +{{- if ne $.TargetVersionName "ga" }} + rawDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, nil, opts...) + rawDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, nil, opts...) +{{- end }} + rawDesired.BinaryAuthorization = canonicalizeClusterBinaryAuthorization(rawDesired.BinaryAuthorization, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Cluster{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, rawInitial.Networking, opts...) + if dcl.StringCanonicalize(rawDesired.AwsRegion, rawInitial.AwsRegion) { + canonicalDesired.AwsRegion = rawInitial.AwsRegion + } else { + canonicalDesired.AwsRegion = rawDesired.AwsRegion + } + canonicalDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, rawInitial.ControlPlane, opts...) + canonicalDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, rawInitial.Authorization, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, rawInitial.Fleet, opts...) +{{- if ne $.TargetVersionName "ga" }} + canonicalDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, rawInitial.LoggingConfig, opts...) + canonicalDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, rawInitial.MonitoringConfig, opts...) +{{- end }} + canonicalDesired.BinaryAuthorization = canonicalizeClusterBinaryAuthorization(rawDesired.BinaryAuthorization, rawInitial.BinaryAuthorization, opts...) + return canonicalDesired, nil +} + +func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Networking) && dcl.IsEmptyValueIndirect(rawDesired.Networking) { + rawNew.Networking = rawDesired.Networking + } else { + rawNew.Networking = canonicalizeNewClusterNetworking(c, rawDesired.Networking, rawNew.Networking) + } + + if dcl.IsEmptyValueIndirect(rawNew.AwsRegion) && dcl.IsEmptyValueIndirect(rawDesired.AwsRegion) { + rawNew.AwsRegion = rawDesired.AwsRegion + } else { + if dcl.StringCanonicalize(rawDesired.AwsRegion, rawNew.AwsRegion) { + rawNew.AwsRegion = rawDesired.AwsRegion + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ControlPlane) && dcl.IsEmptyValueIndirect(rawDesired.ControlPlane) { + rawNew.ControlPlane = rawDesired.ControlPlane + } else { + rawNew.ControlPlane = canonicalizeNewClusterControlPlane(c, rawDesired.ControlPlane, rawNew.ControlPlane) + } + + if dcl.IsEmptyValueIndirect(rawNew.Authorization) && dcl.IsEmptyValueIndirect(rawDesired.Authorization) { + rawNew.Authorization = rawDesired.Authorization + } else { + rawNew.Authorization = canonicalizeNewClusterAuthorization(c, rawDesired.Authorization, rawNew.Authorization) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } else { + if dcl.StringCanonicalize(rawDesired.Endpoint, rawNew.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkloadIdentityConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkloadIdentityConfig) { + rawNew.WorkloadIdentityConfig = rawDesired.WorkloadIdentityConfig + } else { + rawNew.WorkloadIdentityConfig = canonicalizeNewClusterWorkloadIdentityConfig(c, rawDesired.WorkloadIdentityConfig, rawNew.WorkloadIdentityConfig) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Fleet) && dcl.IsEmptyValueIndirect(rawDesired.Fleet) { + rawNew.Fleet = rawDesired.Fleet + } else { + rawNew.Fleet = canonicalizeNewClusterFleet(c, rawDesired.Fleet, rawNew.Fleet) +{{- if ne $.TargetVersionName "ga" }} + } + + if dcl.IsEmptyValueIndirect(rawNew.LoggingConfig) && dcl.IsEmptyValueIndirect(rawDesired.LoggingConfig) { + rawNew.LoggingConfig = rawDesired.LoggingConfig + } else { + rawNew.LoggingConfig = canonicalizeNewClusterLoggingConfig(c, rawDesired.LoggingConfig, rawNew.LoggingConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.MonitoringConfig) && dcl.IsEmptyValueIndirect(rawDesired.MonitoringConfig) { + rawNew.MonitoringConfig = rawDesired.MonitoringConfig + } else { + rawNew.MonitoringConfig = canonicalizeNewClusterMonitoringConfig(c, rawDesired.MonitoringConfig, rawNew.MonitoringConfig) +{{- end }} + } + + if dcl.IsEmptyValueIndirect(rawNew.BinaryAuthorization) && dcl.IsEmptyValueIndirect(rawDesired.BinaryAuthorization) { + rawNew.BinaryAuthorization = rawDesired.BinaryAuthorization + } else { + rawNew.BinaryAuthorization = canonicalizeNewClusterBinaryAuthorization(c, rawDesired.BinaryAuthorization, rawNew.BinaryAuthorization) + } + + return rawNew, nil +} + +func canonicalizeClusterNetworking(des, initial *ClusterNetworking, opts ...dcl.ApplyOption) *ClusterNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterNetworking{} + + if dcl.StringCanonicalize(des.VPCId, initial.VPCId) || dcl.IsZeroValue(des.VPCId) { + cDes.VPCId = initial.VPCId + } else { + cDes.VPCId = des.VPCId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, initial.PodAddressCidrBlocks) { + cDes.PodAddressCidrBlocks = initial.PodAddressCidrBlocks + } else { + cDes.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, initial.ServiceAddressCidrBlocks) { + cDes.ServiceAddressCidrBlocks = initial.ServiceAddressCidrBlocks + } else { + cDes.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, initial.PerNodePoolSgRulesDisabled) || dcl.IsZeroValue(des.PerNodePoolSgRulesDisabled) { + cDes.PerNodePoolSgRulesDisabled = initial.PerNodePoolSgRulesDisabled + } else { + cDes.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } + + return cDes +} + +func canonicalizeClusterNetworkingSlice(des, initial []ClusterNetworking, opts ...dcl.ApplyOption) []ClusterNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterNetworking(c *Client, des, nw *ClusterNetworking) *ClusterNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VPCId, nw.VPCId) { + nw.VPCId = des.VPCId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, nw.PodAddressCidrBlocks) { + nw.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, nw.ServiceAddressCidrBlocks) { + nw.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, nw.PerNodePoolSgRulesDisabled) { + nw.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } + + return nw +} + +func canonicalizeNewClusterNetworkingSet(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterNetworkingSlice(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlane(des, initial *ClusterControlPlane, opts ...dcl.ApplyOption) *ClusterControlPlane { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlane{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.StringCanonicalize(des.InstanceType, initial.InstanceType) || dcl.IsZeroValue(des.InstanceType) { + cDes.InstanceType = initial.InstanceType + } else { + cDes.InstanceType = des.InstanceType + } + cDes.SshConfig = canonicalizeClusterControlPlaneSshConfig(des.SshConfig, initial.SshConfig, opts...) + if dcl.StringArrayCanonicalize(des.SubnetIds, initial.SubnetIds) { + cDes.SubnetIds = initial.SubnetIds + } else { + cDes.SubnetIds = des.SubnetIds + } + cDes.ConfigEncryption = canonicalizeClusterControlPlaneConfigEncryption(des.ConfigEncryption, initial.ConfigEncryption, opts...) + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, initial.SecurityGroupIds) { + cDes.SecurityGroupIds = initial.SecurityGroupIds + } else { + cDes.SecurityGroupIds = des.SecurityGroupIds + } + if dcl.StringCanonicalize(des.IamInstanceProfile, initial.IamInstanceProfile) || dcl.IsZeroValue(des.IamInstanceProfile) { + cDes.IamInstanceProfile = initial.IamInstanceProfile + } else { + cDes.IamInstanceProfile = des.IamInstanceProfile + } + cDes.RootVolume = canonicalizeClusterControlPlaneRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.MainVolume = canonicalizeClusterControlPlaneMainVolume(des.MainVolume, initial.MainVolume, opts...) + cDes.DatabaseEncryption = canonicalizeClusterControlPlaneDatabaseEncryption(des.DatabaseEncryption, initial.DatabaseEncryption, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + cDes.AwsServicesAuthentication = canonicalizeClusterControlPlaneAwsServicesAuthentication(des.AwsServicesAuthentication, initial.AwsServicesAuthentication, opts...) + cDes.ProxyConfig = canonicalizeClusterControlPlaneProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.InstancePlacement = canonicalizeClusterControlPlaneInstancePlacement(des.InstancePlacement, initial.InstancePlacement, opts...) +{{- end }} + + return cDes +} + +func canonicalizeClusterControlPlaneSlice(des, initial []ClusterControlPlane, opts ...dcl.ApplyOption) []ClusterControlPlane { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlane, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlane(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlane, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlane(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlane(c *Client, des, nw *ClusterControlPlane) *ClusterControlPlane { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlane while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + if dcl.StringCanonicalize(des.InstanceType, nw.InstanceType) { + nw.InstanceType = des.InstanceType + } + nw.SshConfig = canonicalizeNewClusterControlPlaneSshConfig(c, des.SshConfig, nw.SshConfig) + if dcl.StringArrayCanonicalize(des.SubnetIds, nw.SubnetIds) { + nw.SubnetIds = des.SubnetIds + } + nw.ConfigEncryption = canonicalizeNewClusterControlPlaneConfigEncryption(c, des.ConfigEncryption, nw.ConfigEncryption) + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, nw.SecurityGroupIds) { + nw.SecurityGroupIds = des.SecurityGroupIds + } + if dcl.StringCanonicalize(des.IamInstanceProfile, nw.IamInstanceProfile) { + nw.IamInstanceProfile = des.IamInstanceProfile + } + nw.RootVolume = canonicalizeNewClusterControlPlaneRootVolume(c, des.RootVolume, nw.RootVolume) + nw.MainVolume = canonicalizeNewClusterControlPlaneMainVolume(c, des.MainVolume, nw.MainVolume) + nw.DatabaseEncryption = canonicalizeNewClusterControlPlaneDatabaseEncryption(c, des.DatabaseEncryption, nw.DatabaseEncryption) + nw.AwsServicesAuthentication = canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, des.AwsServicesAuthentication, nw.AwsServicesAuthentication) + nw.ProxyConfig = canonicalizeNewClusterControlPlaneProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.InstancePlacement = canonicalizeNewClusterControlPlaneInstancePlacement(c, des.InstancePlacement, nw.InstancePlacement) +{{- end }} + + return nw +} + +func canonicalizeNewClusterControlPlaneSet(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlane + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSlice(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlane + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneSshConfig(des, initial *ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneSshConfig{} + + if dcl.StringCanonicalize(des.Ec2KeyPair, initial.Ec2KeyPair) || dcl.IsZeroValue(des.Ec2KeyPair) { + cDes.Ec2KeyPair = initial.Ec2KeyPair + } else { + cDes.Ec2KeyPair = des.Ec2KeyPair + } + + return cDes +} + +func canonicalizeClusterControlPlaneSshConfigSlice(des, initial []ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneSshConfig(c *Client, des, nw *ClusterControlPlaneSshConfig) *ClusterControlPlaneSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Ec2KeyPair, nw.Ec2KeyPair) { + nw.Ec2KeyPair = des.Ec2KeyPair + } + + return nw +} + +func canonicalizeNewClusterControlPlaneSshConfigSet(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSshConfigSlice(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneConfigEncryption(des, initial *ClusterControlPlaneConfigEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneConfigEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneConfigEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneConfigEncryptionSlice(des, initial []ClusterControlPlaneConfigEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneConfigEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneConfigEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneConfigEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneConfigEncryption(c *Client, des, nw *ClusterControlPlaneConfigEncryption) *ClusterControlPlaneConfigEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneConfigEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneConfigEncryptionSet(c *Client, des, nw []ClusterControlPlaneConfigEncryption) []ClusterControlPlaneConfigEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneConfigEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneConfigEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneConfigEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneConfigEncryptionSlice(c *Client, des, nw []ClusterControlPlaneConfigEncryption) []ClusterControlPlaneConfigEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneConfigEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneConfigEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneRootVolume(des, initial *ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneRootVolumeSlice(des, initial []ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneRootVolume(c *Client, des, nw *ClusterControlPlaneRootVolume) *ClusterControlPlaneRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneRootVolumeSet(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneRootVolumeSlice(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneMainVolume(des, initial *ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneMainVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneMainVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneMainVolumeSlice(des, initial []ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneMainVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneMainVolume(c *Client, des, nw *ClusterControlPlaneMainVolume) *ClusterControlPlaneMainVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneMainVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneMainVolumeSet(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneMainVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneMainVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneMainVolumeSlice(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneMainVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneDatabaseEncryption(des, initial *ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneDatabaseEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeClusterControlPlaneDatabaseEncryptionSlice(des, initial []ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneDatabaseEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryption(c *Client, des, nw *ClusterControlPlaneDatabaseEncryption) *ClusterControlPlaneDatabaseEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneDatabaseEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSet(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneDatabaseEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneDatabaseEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSlice(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneDatabaseEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneAwsServicesAuthentication(des, initial *ClusterControlPlaneAwsServicesAuthentication, opts ...dcl.ApplyOption) *ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneAwsServicesAuthentication{} + + if dcl.StringCanonicalize(des.RoleArn, initial.RoleArn) || dcl.IsZeroValue(des.RoleArn) { + cDes.RoleArn = initial.RoleArn + } else { + cDes.RoleArn = des.RoleArn + } + if dcl.StringCanonicalize(des.RoleSessionName, initial.RoleSessionName) || dcl.IsZeroValue(des.RoleSessionName) { + cDes.RoleSessionName = initial.RoleSessionName + } else { + cDes.RoleSessionName = des.RoleSessionName + } + + return cDes +} + +func canonicalizeClusterControlPlaneAwsServicesAuthenticationSlice(des, initial []ClusterControlPlaneAwsServicesAuthentication, opts ...dcl.ApplyOption) []ClusterControlPlaneAwsServicesAuthentication { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneAwsServicesAuthentication(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneAwsServicesAuthentication(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c *Client, des, nw *ClusterControlPlaneAwsServicesAuthentication) *ClusterControlPlaneAwsServicesAuthentication { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneAwsServicesAuthentication while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.RoleArn, nw.RoleArn) { + nw.RoleArn = des.RoleArn + } + if dcl.StringCanonicalize(des.RoleSessionName, nw.RoleSessionName) { + nw.RoleSessionName = des.RoleSessionName + } + + return nw +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthenticationSet(c *Client, des, nw []ClusterControlPlaneAwsServicesAuthentication) []ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneAwsServicesAuthentication + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneAwsServicesAuthenticationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, des, nw []ClusterControlPlaneAwsServicesAuthentication) []ClusterControlPlaneAwsServicesAuthentication { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneAwsServicesAuthentication + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneAwsServicesAuthentication(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneProxyConfig(des, initial *ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneProxyConfig{} + + if dcl.StringCanonicalize(des.SecretArn, initial.SecretArn) || dcl.IsZeroValue(des.SecretArn) { + cDes.SecretArn = initial.SecretArn + } else { + cDes.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, initial.SecretVersion) || dcl.IsZeroValue(des.SecretVersion) { + cDes.SecretVersion = initial.SecretVersion + } else { + cDes.SecretVersion = des.SecretVersion + } + + return cDes +} + +func canonicalizeClusterControlPlaneProxyConfigSlice(des, initial []ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneProxyConfig(c *Client, des, nw *ClusterControlPlaneProxyConfig) *ClusterControlPlaneProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SecretArn, nw.SecretArn) { + nw.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, nw.SecretVersion) { + nw.SecretVersion = des.SecretVersion + } + + return nw +} + +func canonicalizeNewClusterControlPlaneProxyConfigSet(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneProxyConfigSlice(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &n)) + } + + return items +} + +{{- if ne $.TargetVersionName "ga" }} +func canonicalizeClusterControlPlaneInstancePlacement(des, initial *ClusterControlPlaneInstancePlacement, opts ...dcl.ApplyOption) *ClusterControlPlaneInstancePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneInstancePlacement{} + + if dcl.IsZeroValue(des.Tenancy) || (dcl.IsEmptyValueIndirect(des.Tenancy) && dcl.IsEmptyValueIndirect(initial.Tenancy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tenancy = initial.Tenancy + } else { + cDes.Tenancy = des.Tenancy + } + + return cDes +} + +func canonicalizeClusterControlPlaneInstancePlacementSlice(des, initial []ClusterControlPlaneInstancePlacement, opts ...dcl.ApplyOption) []ClusterControlPlaneInstancePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneInstancePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneInstancePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneInstancePlacement(c *Client, des, nw *ClusterControlPlaneInstancePlacement) *ClusterControlPlaneInstancePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneInstancePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneInstancePlacementSet(c *Client, des, nw []ClusterControlPlaneInstancePlacement) []ClusterControlPlaneInstancePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneInstancePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneInstancePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneInstancePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneInstancePlacementSlice(c *Client, des, nw []ClusterControlPlaneInstancePlacement) []ClusterControlPlaneInstancePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneInstancePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneInstancePlacement(c, &d, &n)) + } + + return items +} + +{{- end }} +func canonicalizeClusterAuthorization(des, initial *ClusterAuthorization, opts ...dcl.ApplyOption) *ClusterAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorization{} + + cDes.AdminUsers = canonicalizeClusterAuthorizationAdminUsersSlice(des.AdminUsers, initial.AdminUsers, opts...) + cDes.AdminGroups = canonicalizeClusterAuthorizationAdminGroupsSlice(des.AdminGroups, initial.AdminGroups, opts...) + + return cDes +} + +func canonicalizeClusterAuthorizationSlice(des, initial []ClusterAuthorization, opts ...dcl.ApplyOption) []ClusterAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorization(c *Client, des, nw *ClusterAuthorization) *ClusterAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AdminUsers = canonicalizeNewClusterAuthorizationAdminUsersSlice(c, des.AdminUsers, nw.AdminUsers) + nw.AdminGroups = canonicalizeNewClusterAuthorizationAdminGroupsSlice(c, des.AdminGroups, nw.AdminGroups) + + return nw +} + +func canonicalizeNewClusterAuthorizationSet(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationSlice(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminUsers(des, initial *ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminUsers{} + + if dcl.StringCanonicalize(des.Username, initial.Username) || dcl.IsZeroValue(des.Username) { + cDes.Username = initial.Username + } else { + cDes.Username = des.Username + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminUsersSlice(des, initial []ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminUsers(c *Client, des, nw *ClusterAuthorizationAdminUsers) *ClusterAuthorizationAdminUsers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminUsers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminUsersSet(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminUsers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminUsersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminUsersSlice(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminUsers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminGroups(des, initial *ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminGroups{} + + if dcl.StringCanonicalize(des.Group, initial.Group) || dcl.IsZeroValue(des.Group) { + cDes.Group = initial.Group + } else { + cDes.Group = des.Group + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminGroupsSlice(des, initial []ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminGroups(c *Client, des, nw *ClusterAuthorizationAdminGroups) *ClusterAuthorizationAdminGroups { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminGroups while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Group, nw.Group) { + nw.Group = des.Group + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSet(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminGroups + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminGroupsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSlice(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminGroups + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterWorkloadIdentityConfig(des, initial *ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) *ClusterWorkloadIdentityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterWorkloadIdentityConfig{} + + if dcl.StringCanonicalize(des.IssuerUri, initial.IssuerUri) || dcl.IsZeroValue(des.IssuerUri) { + cDes.IssuerUri = initial.IssuerUri + } else { + cDes.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, initial.WorkloadPool) || dcl.IsZeroValue(des.WorkloadPool) { + cDes.WorkloadPool = initial.WorkloadPool + } else { + cDes.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, initial.IdentityProvider) || dcl.IsZeroValue(des.IdentityProvider) { + cDes.IdentityProvider = initial.IdentityProvider + } else { + cDes.IdentityProvider = des.IdentityProvider + } + + return cDes +} + +func canonicalizeClusterWorkloadIdentityConfigSlice(des, initial []ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) []ClusterWorkloadIdentityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterWorkloadIdentityConfig(c *Client, des, nw *ClusterWorkloadIdentityConfig) *ClusterWorkloadIdentityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterWorkloadIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IssuerUri, nw.IssuerUri) { + nw.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, nw.WorkloadPool) { + nw.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { + nw.IdentityProvider = des.IdentityProvider + } + + return nw +} + +func canonicalizeNewClusterWorkloadIdentityConfigSet(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterWorkloadIdentityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterWorkloadIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterWorkloadIdentityConfigSlice(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterWorkloadIdentityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterFleet(des, initial *ClusterFleet, opts ...dcl.ApplyOption) *ClusterFleet { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterFleet{} + + if dcl.PartialSelfLinkToSelfLink(des.Project, initial.Project) || dcl.IsZeroValue(des.Project) { + cDes.Project = initial.Project + } else { + cDes.Project = des.Project + } + + return cDes +} + +func canonicalizeClusterFleetSlice(des, initial []ClusterFleet, opts ...dcl.ApplyOption) []ClusterFleet { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterFleet, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterFleet(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterFleet, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterFleet(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterFleet(c *Client, des, nw *ClusterFleet) *ClusterFleet { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterFleet while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.PartialSelfLinkToSelfLink(des.Project, nw.Project) { + nw.Project = des.Project + } + if dcl.StringCanonicalize(des.Membership, nw.Membership) { + nw.Membership = des.Membership + } + + return nw +} + +func canonicalizeNewClusterFleetSet(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterFleet + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterFleetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterFleet(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterFleetSlice(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterFleet + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterFleet(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeClusterLoggingConfig(des, initial *ClusterLoggingConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfig{} + + cDes.ComponentConfig = canonicalizeClusterLoggingConfigComponentConfig(des.ComponentConfig, initial.ComponentConfig, opts...) + + return cDes +} + +func canonicalizeClusterLoggingConfigSlice(des, initial []ClusterLoggingConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfig(c *Client, des, nw *ClusterLoggingConfig) *ClusterLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ComponentConfig = canonicalizeNewClusterLoggingConfigComponentConfig(c, des.ComponentConfig, nw.ComponentConfig) + + return nw +} + +func canonicalizeNewClusterLoggingConfigSet(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigSlice(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterLoggingConfigComponentConfig(des, initial *ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfigComponentConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsZeroValue(des.EnableComponents) || (dcl.IsEmptyValueIndirect(des.EnableComponents) && dcl.IsEmptyValueIndirect(initial.EnableComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EnableComponents = initial.EnableComponents + } else { + cDes.EnableComponents = des.EnableComponents + } + + return cDes +} + +func canonicalizeClusterLoggingConfigComponentConfigSlice(des, initial []ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfigComponentConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfigComponentConfig(c *Client, des, nw *ClusterLoggingConfigComponentConfig) *ClusterLoggingConfigComponentConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfigComponentConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSet(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfigComponentConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigComponentConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSlice(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfigComponentConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfig(des, initial *ClusterMonitoringConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfig{} + + cDes.ManagedPrometheusConfig = canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des.ManagedPrometheusConfig, initial.ManagedPrometheusConfig, opts...) + + return cDes +} + +func canonicalizeClusterMonitoringConfigSlice(des, initial []ClusterMonitoringConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfig(c *Client, des, nw *ClusterMonitoringConfig) *ClusterMonitoringConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedPrometheusConfig = canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, des.ManagedPrometheusConfig, nw.ManagedPrometheusConfig) + + return nw +} + +func canonicalizeNewClusterMonitoringConfigSet(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigSlice(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des, initial *ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + + return cDes +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfigSlice(des, initial []ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfigManagedPrometheusConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c *Client, des, nw *ClusterMonitoringConfigManagedPrometheusConfig) *ClusterMonitoringConfigManagedPrometheusConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfigManagedPrometheusConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + + return nw +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSet(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfigManagedPrometheusConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfigManagedPrometheusConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeClusterBinaryAuthorization(des, initial *ClusterBinaryAuthorization, opts ...dcl.ApplyOption) *ClusterBinaryAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterBinaryAuthorization{} + + if dcl.IsZeroValue(des.EvaluationMode) || (dcl.IsEmptyValueIndirect(des.EvaluationMode) && dcl.IsEmptyValueIndirect(initial.EvaluationMode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EvaluationMode = initial.EvaluationMode + } else { + cDes.EvaluationMode = des.EvaluationMode + } + + return cDes +} + +func canonicalizeClusterBinaryAuthorizationSlice(des, initial []ClusterBinaryAuthorization, opts ...dcl.ApplyOption) []ClusterBinaryAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterBinaryAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterBinaryAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterBinaryAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterBinaryAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterBinaryAuthorization(c *Client, des, nw *ClusterBinaryAuthorization) *ClusterBinaryAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterBinaryAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterBinaryAuthorizationSet(c *Client, des, nw []ClusterBinaryAuthorization) []ClusterBinaryAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterBinaryAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterBinaryAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterBinaryAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterBinaryAuthorizationSlice(c *Client, des, nw []ClusterBinaryAuthorization) []ClusterBinaryAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterBinaryAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterBinaryAuthorization(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Networking, actual.Networking, dcl.DiffInfo{ObjectFunction: compareClusterNetworkingNewStyle, EmptyObject: EmptyClusterNetworking, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Networking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AwsRegion, actual.AwsRegion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AwsRegion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneNewStyle, EmptyObject: EmptyClusterControlPlane, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Authorization, actual.Authorization, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationNewStyle, EmptyObject: EmptyClusterAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Authorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadIdentityConfig, actual.WorkloadIdentityConfig, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterWorkloadIdentityConfigNewStyle, EmptyObject: EmptyClusterWorkloadIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fleet, actual.Fleet, dcl.DiffInfo{ObjectFunction: compareClusterFleetNewStyle, EmptyObject: EmptyClusterFleet, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fleet")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigNewStyle, EmptyObject: EmptyClusterLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MonitoringConfig, actual.MonitoringConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoringConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- end }} + if ds, err := dcl.Diff(desired.BinaryAuthorization, actual.BinaryAuthorization, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterBinaryAuthorizationNewStyle, EmptyObject: EmptyClusterBinaryAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BinaryAuthorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareClusterNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterNetworking) + if !ok { + desiredNotPointer, ok := d.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking or *ClusterNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterNetworking) + if !ok { + actualNotPointer, ok := a.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VPCId, actual.VPCId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VpcId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAddressCidrBlocks, actual.PodAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAddressCidrBlocks, actual.ServiceAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PerNodePoolSgRulesDisabled, actual.PerNodePoolSgRulesDisabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("PerNodePoolSgRulesDisabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlane) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane or *ClusterControlPlane", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlane) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceType, actual.InstanceType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("InstanceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneSshConfigNewStyle, EmptyObject: EmptyClusterControlPlaneSshConfig, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetIds, actual.SubnetIds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConfigEncryption, actual.ConfigEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneConfigEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneConfigEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityGroupIds, actual.SecurityGroupIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecurityGroupIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IamInstanceProfile, actual.IamInstanceProfile, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("IamInstanceProfile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneRootVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainVolume, actual.MainVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneMainVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneMainVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DatabaseEncryption, actual.DatabaseEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneDatabaseEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneDatabaseEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatabaseEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AwsServicesAuthentication, actual.AwsServicesAuthentication, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneAwsServicesAuthenticationNewStyle, EmptyObject: EmptyClusterControlPlaneAwsServicesAuthentication, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AwsServicesAuthentication")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneProxyConfigNewStyle, EmptyObject: EmptyClusterControlPlaneProxyConfig, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- if ne $.TargetVersionName "ga" }} + + if ds, err := dcl.Diff(desired.InstancePlacement, actual.InstancePlacement, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneInstancePlacementNewStyle, EmptyObject: EmptyClusterControlPlaneInstancePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstancePlacement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- end }} + return diffs, nil +} + +func compareClusterControlPlaneSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneSshConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig or *ClusterControlPlaneSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneSshConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Ec2KeyPair, actual.Ec2KeyPair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Ec2KeyPair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneConfigEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneConfigEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneConfigEncryption or *ClusterControlPlaneConfigEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneConfigEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneConfigEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneRootVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume or *ClusterControlPlaneRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneRootVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneMainVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneMainVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume or *ClusterControlPlaneMainVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneMainVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneDatabaseEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption or *ClusterControlPlaneDatabaseEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneAwsServicesAuthenticationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneAwsServicesAuthentication) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneAwsServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneAwsServicesAuthentication or *ClusterControlPlaneAwsServicesAuthentication", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneAwsServicesAuthentication) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneAwsServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneAwsServicesAuthentication", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.RoleArn, actual.RoleArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("RoleArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RoleSessionName, actual.RoleSessionName, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("RoleSessionName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneProxyConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig or *ClusterControlPlaneProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneProxyConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SecretArn, actual.SecretArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecretArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretVersion, actual.SecretVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("SecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- if ne $.TargetVersionName "ga" }} +func compareClusterControlPlaneInstancePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneInstancePlacement) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneInstancePlacement or *ClusterControlPlaneInstancePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneInstancePlacement) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneInstancePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Tenancy, actual.Tenancy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tenancy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- end }} +func compareClusterAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization or *ClusterAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AdminUsers, actual.AdminUsers, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminUsersNewStyle, EmptyObject: EmptyClusterAuthorizationAdminUsers, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("AdminUsers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AdminGroups, actual.AdminGroups, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminGroupsNewStyle, EmptyObject: EmptyClusterAuthorizationAdminGroups, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("AdminGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminUsersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminUsers) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers or *ClusterAuthorizationAdminUsers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminUsers) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminGroupsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminGroups) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups or *ClusterAuthorizationAdminGroups", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminGroups) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Group, actual.Group, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Group")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterWorkloadIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterWorkloadIdentityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig or *ClusterWorkloadIdentityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterWorkloadIdentityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IssuerUri, actual.IssuerUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IssuerUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadPool, actual.WorkloadPool, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterFleetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterFleet) + if !ok { + desiredNotPointer, ok := d.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet or *ClusterFleet", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterFleet) + if !ok { + actualNotPointer, ok := a.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig or *ClusterLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ComponentConfig, actual.ComponentConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigComponentConfigNewStyle, EmptyObject: EmptyClusterLoggingConfigComponentConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigComponentConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfigComponentConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig or *ClusterLoggingConfigComponentConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfigComponentConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableComponents, actual.EnableComponents, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("EnableComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig or *ClusterMonitoringConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedPrometheusConfig, actual.ManagedPrometheusConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigManagedPrometheusConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfigManagedPrometheusConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedPrometheusConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig or *ClusterMonitoringConfigManagedPrometheusConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterBinaryAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterBinaryAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterBinaryAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterBinaryAuthorization or *ClusterBinaryAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterBinaryAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterBinaryAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterBinaryAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EvaluationMode, actual.EvaluationMode, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("EvaluationMode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Cluster) urlNormalized() *Cluster { + normalized := dcl.Copy(*r).(Cluster) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.AwsRegion = dcl.SelfLinkToName(r.AwsRegion) + normalized.Endpoint = dcl.SelfLinkToName(r.Endpoint) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAwsCluster" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Cluster resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Cluster) marshal(c *Client) ([]byte, error) { + m, err := expandCluster(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Cluster: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalCluster decodes JSON responses into the Cluster resource schema. +func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapCluster(m, c, res) +} + +func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { + + flattened := flattenCluster(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandCluster expands Cluster into a JSON request object. +func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/awsClusters/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networking"] = v + } + if v := f.AwsRegion; dcl.ValueShouldBeSent(v) { + m["awsRegion"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorization"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandClusterFleet(c, f.Fleet, res); err != nil { + return nil, fmt.Errorf("error expanding Fleet into fleet: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleet"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoringConfig"] = v +{{- end }} + } + if v, err := expandClusterBinaryAuthorization(c, f.BinaryAuthorization, res); err != nil { + return nil, fmt.Errorf("error expanding BinaryAuthorization into binaryAuthorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["binaryAuthorization"] = v + } + + return m, nil +} + +// flattenCluster flattens Cluster from a JSON request object into the +// Cluster type. +func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Cluster{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Networking = flattenClusterNetworking(c, m["networking"], res) + resultRes.AwsRegion = dcl.FlattenString(m["awsRegion"]) + resultRes.ControlPlane = flattenClusterControlPlane(c, m["controlPlane"], res) + resultRes.Authorization = flattenClusterAuthorization(c, m["authorization"], res) + resultRes.State = flattenClusterStateEnum(m["state"]) + resultRes.Endpoint = dcl.FlattenString(m["endpoint"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.WorkloadIdentityConfig = flattenClusterWorkloadIdentityConfig(c, m["workloadIdentityConfig"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Fleet = flattenClusterFleet(c, m["fleet"], res) +{{- if ne $.TargetVersionName "ga" }} + resultRes.LoggingConfig = flattenClusterLoggingConfig(c, m["loggingConfig"], res) + resultRes.MonitoringConfig = flattenClusterMonitoringConfig(c, m["monitoringConfig"], res) +{{- end }} + resultRes.BinaryAuthorization = flattenClusterBinaryAuthorization(c, m["binaryAuthorization"], res) + + return resultRes +} + +// expandClusterNetworkingMap expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingMap(c *Client, f map[string]ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterNetworkingSlice expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingSlice(c *Client, f []ClusterNetworking, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterNetworkingMap flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterNetworking{} + } + + if len(a) == 0 { + return map[string]ClusterNetworking{} + } + + items := make(map[string]ClusterNetworking) + for k, item := range a { + items[k] = *flattenClusterNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterNetworkingSlice flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingSlice(c *Client, i interface{}, res *Cluster) []ClusterNetworking { + a, ok := i.([]interface{}) + if !ok { + return []ClusterNetworking{} + } + + if len(a) == 0 { + return []ClusterNetworking{} + } + + items := make([]ClusterNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterNetworking expands an instance of ClusterNetworking into a JSON +// request object. +func expandClusterNetworking(c *Client, f *ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VPCId; !dcl.IsEmptyValueIndirect(v) { + m["vpcId"] = v + } + if v := f.PodAddressCidrBlocks; v != nil { + m["podAddressCidrBlocks"] = v + } + if v := f.ServiceAddressCidrBlocks; v != nil { + m["serviceAddressCidrBlocks"] = v + } + if v := f.PerNodePoolSgRulesDisabled; !dcl.IsEmptyValueIndirect(v) { + m["perNodePoolSgRulesDisabled"] = v + } + + return m, nil +} + +// flattenClusterNetworking flattens an instance of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworking(c *Client, i interface{}, res *Cluster) *ClusterNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterNetworking + } + r.VPCId = dcl.FlattenString(m["vpcId"]) + r.PodAddressCidrBlocks = dcl.FlattenStringSlice(m["podAddressCidrBlocks"]) + r.ServiceAddressCidrBlocks = dcl.FlattenStringSlice(m["serviceAddressCidrBlocks"]) + r.PerNodePoolSgRulesDisabled = dcl.FlattenBool(m["perNodePoolSgRulesDisabled"]) + + return r +} + +// expandClusterControlPlaneMap expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneMap(c *Client, f map[string]ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSlice expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneSlice(c *Client, f []ClusterControlPlane, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMap flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlane { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlane{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlane{} + } + + items := make(map[string]ClusterControlPlane) + for k, item := range a { + items[k] = *flattenClusterControlPlane(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSlice flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlane { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlane{} + } + + if len(a) == 0 { + return []ClusterControlPlane{} + } + + items := make([]ClusterControlPlane, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlane(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlane expands an instance of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlane(c *Client, f *ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.InstanceType; !dcl.IsEmptyValueIndirect(v) { + m["instanceType"] = v + } + if v, err := expandClusterControlPlaneSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } + if v := f.SubnetIds; v != nil { + m["subnetIds"] = v + } + if v, err := expandClusterControlPlaneConfigEncryption(c, f.ConfigEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigEncryption into configEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configEncryption"] = v + } + if v := f.SecurityGroupIds; v != nil { + m["securityGroupIds"] = v + } + if v := f.IamInstanceProfile; !dcl.IsEmptyValueIndirect(v) { + m["iamInstanceProfile"] = v + } + if v, err := expandClusterControlPlaneRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandClusterControlPlaneMainVolume(c, f.MainVolume, res); err != nil { + return nil, fmt.Errorf("error expanding MainVolume into mainVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mainVolume"] = v + } + if v, err := expandClusterControlPlaneDatabaseEncryption(c, f.DatabaseEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding DatabaseEncryption into databaseEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["databaseEncryption"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v, err := expandClusterControlPlaneAwsServicesAuthentication(c, f.AwsServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AwsServicesAuthentication into awsServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["awsServicesAuthentication"] = v + } + if v, err := expandClusterControlPlaneProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterControlPlaneInstancePlacement(c, f.InstancePlacement, res); err != nil { + return nil, fmt.Errorf("error expanding InstancePlacement into instancePlacement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["instancePlacement"] = v +{{- end }} + } + + return m, nil +} + +// flattenClusterControlPlane flattens an instance of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlane(c *Client, i interface{}, res *Cluster) *ClusterControlPlane { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlane{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlane + } + r.Version = dcl.FlattenString(m["version"]) + r.InstanceType = dcl.FlattenString(m["instanceType"]) + r.SshConfig = flattenClusterControlPlaneSshConfig(c, m["sshConfig"], res) + r.SubnetIds = dcl.FlattenStringSlice(m["subnetIds"]) + r.ConfigEncryption = flattenClusterControlPlaneConfigEncryption(c, m["configEncryption"], res) + r.SecurityGroupIds = dcl.FlattenStringSlice(m["securityGroupIds"]) + r.IamInstanceProfile = dcl.FlattenString(m["iamInstanceProfile"]) + r.RootVolume = flattenClusterControlPlaneRootVolume(c, m["rootVolume"], res) + r.MainVolume = flattenClusterControlPlaneMainVolume(c, m["mainVolume"], res) + r.DatabaseEncryption = flattenClusterControlPlaneDatabaseEncryption(c, m["databaseEncryption"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.AwsServicesAuthentication = flattenClusterControlPlaneAwsServicesAuthentication(c, m["awsServicesAuthentication"], res) + r.ProxyConfig = flattenClusterControlPlaneProxyConfig(c, m["proxyConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.InstancePlacement = flattenClusterControlPlaneInstancePlacement(c, m["instancePlacement"], res) +{{- end }} + + return r +} + +// expandClusterControlPlaneSshConfigMap expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigMap(c *Client, f map[string]ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSshConfigSlice expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigSlice(c *Client, f []ClusterControlPlaneSshConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneSshConfigMap flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneSshConfig{} + } + + items := make(map[string]ClusterControlPlaneSshConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSshConfigSlice flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneSshConfig{} + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneSshConfig expands an instance of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfig(c *Client, f *ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Ec2KeyPair; !dcl.IsEmptyValueIndirect(v) { + m["ec2KeyPair"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneSshConfig flattens an instance of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneSshConfig + } + r.Ec2KeyPair = dcl.FlattenString(m["ec2KeyPair"]) + + return r +} + +// expandClusterControlPlaneConfigEncryptionMap expands the contents of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryptionMap(c *Client, f map[string]ClusterControlPlaneConfigEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneConfigEncryptionSlice expands the contents of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryptionSlice(c *Client, f []ClusterControlPlaneConfigEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneConfigEncryptionMap flattens the contents of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneConfigEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneConfigEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneConfigEncryption{} + } + + items := make(map[string]ClusterControlPlaneConfigEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneConfigEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneConfigEncryptionSlice flattens the contents of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneConfigEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneConfigEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneConfigEncryption{} + } + + items := make([]ClusterControlPlaneConfigEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneConfigEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneConfigEncryption expands an instance of ClusterControlPlaneConfigEncryption into a JSON +// request object. +func expandClusterControlPlaneConfigEncryption(c *Client, f *ClusterControlPlaneConfigEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneConfigEncryption flattens an instance of ClusterControlPlaneConfigEncryption from a JSON +// response object. +func flattenClusterControlPlaneConfigEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneConfigEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneConfigEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneConfigEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneRootVolumeMap expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeMap(c *Client, f map[string]ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneRootVolumeSlice expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeSlice(c *Client, f []ClusterControlPlaneRootVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneRootVolumeMap flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolume{} + } + + items := make(map[string]ClusterControlPlaneRootVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeSlice flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolume{} + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneRootVolume expands an instance of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolume(c *Client, f *ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneRootVolume flattens an instance of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenClusterControlPlaneRootVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneMainVolumeMap expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeMap(c *Client, f map[string]ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneMainVolumeSlice expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeSlice(c *Client, f []ClusterControlPlaneMainVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMainVolumeMap flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolume{} + } + + items := make(map[string]ClusterControlPlaneMainVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeSlice flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolume{} + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneMainVolume expands an instance of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolume(c *Client, f *ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneMainVolume flattens an instance of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneMainVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneMainVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneMainVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenClusterControlPlaneMainVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneDatabaseEncryptionMap expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionMap(c *Client, f map[string]ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneDatabaseEncryptionSlice expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionSlice(c *Client, f []ClusterControlPlaneDatabaseEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneDatabaseEncryptionMap flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneDatabaseEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + items := make(map[string]ClusterControlPlaneDatabaseEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneDatabaseEncryptionSlice flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneDatabaseEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneDatabaseEncryption{} + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneDatabaseEncryption expands an instance of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryption(c *Client, f *ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneDatabaseEncryption flattens an instance of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneDatabaseEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneDatabaseEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandClusterControlPlaneAwsServicesAuthenticationMap expands the contents of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthenticationMap(c *Client, f map[string]ClusterControlPlaneAwsServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneAwsServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneAwsServicesAuthenticationSlice expands the contents of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, f []ClusterControlPlaneAwsServicesAuthentication, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneAwsServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneAwsServicesAuthenticationMap flattens the contents of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthenticationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneAwsServicesAuthentication { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneAwsServicesAuthentication{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneAwsServicesAuthentication{} + } + + items := make(map[string]ClusterControlPlaneAwsServicesAuthentication) + for k, item := range a { + items[k] = *flattenClusterControlPlaneAwsServicesAuthentication(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneAwsServicesAuthenticationSlice flattens the contents of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthenticationSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneAwsServicesAuthentication { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneAwsServicesAuthentication{} + } + + if len(a) == 0 { + return []ClusterControlPlaneAwsServicesAuthentication{} + } + + items := make([]ClusterControlPlaneAwsServicesAuthentication, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneAwsServicesAuthentication(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneAwsServicesAuthentication expands an instance of ClusterControlPlaneAwsServicesAuthentication into a JSON +// request object. +func expandClusterControlPlaneAwsServicesAuthentication(c *Client, f *ClusterControlPlaneAwsServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.RoleArn; !dcl.IsEmptyValueIndirect(v) { + m["roleArn"] = v + } + if v := f.RoleSessionName; !dcl.IsEmptyValueIndirect(v) { + m["roleSessionName"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneAwsServicesAuthentication flattens an instance of ClusterControlPlaneAwsServicesAuthentication from a JSON +// response object. +func flattenClusterControlPlaneAwsServicesAuthentication(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneAwsServicesAuthentication { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneAwsServicesAuthentication{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneAwsServicesAuthentication + } + r.RoleArn = dcl.FlattenString(m["roleArn"]) + r.RoleSessionName = dcl.FlattenString(m["roleSessionName"]) + + return r +} + +// expandClusterControlPlaneProxyConfigMap expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigMap(c *Client, f map[string]ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneProxyConfigSlice expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigSlice(c *Client, f []ClusterControlPlaneProxyConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneProxyConfigMap flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneProxyConfig{} + } + + items := make(map[string]ClusterControlPlaneProxyConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneProxyConfigSlice flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneProxyConfig{} + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneProxyConfig expands an instance of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfig(c *Client, f *ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SecretArn; !dcl.IsEmptyValueIndirect(v) { + m["secretArn"] = v + } + if v := f.SecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["secretVersion"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneProxyConfig flattens an instance of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneProxyConfig + } + r.SecretArn = dcl.FlattenString(m["secretArn"]) + r.SecretVersion = dcl.FlattenString(m["secretVersion"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterControlPlaneInstancePlacementMap expands the contents of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacementMap(c *Client, f map[string]ClusterControlPlaneInstancePlacement, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneInstancePlacementSlice expands the contents of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacementSlice(c *Client, f []ClusterControlPlaneInstancePlacement, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneInstancePlacementMap flattens the contents of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneInstancePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneInstancePlacement{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneInstancePlacement{} + } + + items := make(map[string]ClusterControlPlaneInstancePlacement) + for k, item := range a { + items[k] = *flattenClusterControlPlaneInstancePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementSlice flattens the contents of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneInstancePlacement { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneInstancePlacement{} + } + + if len(a) == 0 { + return []ClusterControlPlaneInstancePlacement{} + } + + items := make([]ClusterControlPlaneInstancePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneInstancePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneInstancePlacement expands an instance of ClusterControlPlaneInstancePlacement into a JSON +// request object. +func expandClusterControlPlaneInstancePlacement(c *Client, f *ClusterControlPlaneInstancePlacement, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Tenancy; !dcl.IsEmptyValueIndirect(v) { + m["tenancy"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneInstancePlacement flattens an instance of ClusterControlPlaneInstancePlacement from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacement(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneInstancePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneInstancePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneInstancePlacement + } + r.Tenancy = flattenClusterControlPlaneInstancePlacementTenancyEnum(m["tenancy"]) + + return r +} + +{{- end }} +// expandClusterAuthorizationMap expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationMap(c *Client, f map[string]ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationSlice expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationSlice(c *Client, f []ClusterAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationMap flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorization{} + } + + items := make(map[string]ClusterAuthorization) + for k, item := range a { + items[k] = *flattenClusterAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationSlice flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorization{} + } + + if len(a) == 0 { + return []ClusterAuthorization{} + } + + items := make([]ClusterAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorization expands an instance of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorization(c *Client, f *ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterAuthorizationAdminUsersSlice(c, f.AdminUsers, res); err != nil { + return nil, fmt.Errorf("error expanding AdminUsers into adminUsers: %w", err) + } else if v != nil { + m["adminUsers"] = v + } + if v, err := expandClusterAuthorizationAdminGroupsSlice(c, f.AdminGroups, res); err != nil { + return nil, fmt.Errorf("error expanding AdminGroups into adminGroups: %w", err) + } else if v != nil { + m["adminGroups"] = v + } + + return m, nil +} + +// flattenClusterAuthorization flattens an instance of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorization(c *Client, i interface{}, res *Cluster) *ClusterAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorization + } + r.AdminUsers = flattenClusterAuthorizationAdminUsersSlice(c, m["adminUsers"], res) + r.AdminGroups = flattenClusterAuthorizationAdminGroupsSlice(c, m["adminGroups"], res) + + return r +} + +// expandClusterAuthorizationAdminUsersMap expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersMap(c *Client, f map[string]ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminUsersSlice expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersSlice(c *Client, f []ClusterAuthorizationAdminUsers, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminUsersMap flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminUsers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminUsers{} + } + + items := make(map[string]ClusterAuthorizationAdminUsers) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminUsersSlice flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminUsers { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminUsers{} + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminUsers expands an instance of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsers(c *Client, f *ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Username; !dcl.IsEmptyValueIndirect(v) { + m["username"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminUsers flattens an instance of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsers(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminUsers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminUsers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminUsers + } + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandClusterAuthorizationAdminGroupsMap expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsMap(c *Client, f map[string]ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminGroupsSlice expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsSlice(c *Client, f []ClusterAuthorizationAdminGroups, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminGroupsMap flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminGroups { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminGroups{} + } + + items := make(map[string]ClusterAuthorizationAdminGroups) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminGroupsSlice flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminGroups { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminGroups{} + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminGroups expands an instance of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroups(c *Client, f *ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Group; !dcl.IsEmptyValueIndirect(v) { + m["group"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminGroups flattens an instance of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroups(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminGroups { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminGroups{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminGroups + } + r.Group = dcl.FlattenString(m["group"]) + + return r +} + +// expandClusterWorkloadIdentityConfigMap expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigMap(c *Client, f map[string]ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterWorkloadIdentityConfigSlice expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigSlice(c *Client, f []ClusterWorkloadIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterWorkloadIdentityConfigMap flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterWorkloadIdentityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterWorkloadIdentityConfig{} + } + + items := make(map[string]ClusterWorkloadIdentityConfig) + for k, item := range a { + items[k] = *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterWorkloadIdentityConfigSlice flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterWorkloadIdentityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return []ClusterWorkloadIdentityConfig{} + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterWorkloadIdentityConfig expands an instance of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfig(c *Client, f *ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IssuerUri; !dcl.IsEmptyValueIndirect(v) { + m["issuerUri"] = v + } + if v := f.WorkloadPool; !dcl.IsEmptyValueIndirect(v) { + m["workloadPool"] = v + } + if v := f.IdentityProvider; !dcl.IsEmptyValueIndirect(v) { + m["identityProvider"] = v + } + + return m, nil +} + +// flattenClusterWorkloadIdentityConfig flattens an instance of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterWorkloadIdentityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterWorkloadIdentityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterWorkloadIdentityConfig + } + r.IssuerUri = dcl.FlattenString(m["issuerUri"]) + r.WorkloadPool = dcl.FlattenString(m["workloadPool"]) + r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) + + return r +} + +// expandClusterFleetMap expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetMap(c *Client, f map[string]ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterFleetSlice expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetSlice(c *Client, f []ClusterFleet, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterFleetMap flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterFleet { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterFleet{} + } + + if len(a) == 0 { + return map[string]ClusterFleet{} + } + + items := make(map[string]ClusterFleet) + for k, item := range a { + items[k] = *flattenClusterFleet(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterFleetSlice flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetSlice(c *Client, i interface{}, res *Cluster) []ClusterFleet { + a, ok := i.([]interface{}) + if !ok { + return []ClusterFleet{} + } + + if len(a) == 0 { + return []ClusterFleet{} + } + + items := make([]ClusterFleet, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterFleet(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterFleet expands an instance of ClusterFleet into a JSON +// request object. +func expandClusterFleet(c *Client, f *ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := dcl.DeriveField("projects/%s", f.Project, dcl.SelfLinkToName(f.Project)); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenClusterFleet flattens an instance of ClusterFleet from a JSON +// response object. +func flattenClusterFleet(c *Client, i interface{}, res *Cluster) *ClusterFleet { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterFleet{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterFleet + } + r.Project = dcl.FlattenString(m["project"]) + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterLoggingConfigMap expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigMap(c *Client, f map[string]ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigSlice expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigSlice(c *Client, f []ClusterLoggingConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigMap flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfig{} + } + + items := make(map[string]ClusterLoggingConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigSlice flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfig{} + } + + items := make([]ClusterLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfig expands an instance of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfig(c *Client, f *ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterLoggingConfigComponentConfig(c, f.ComponentConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ComponentConfig into componentConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["componentConfig"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfig flattens an instance of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfig + } + r.ComponentConfig = flattenClusterLoggingConfigComponentConfig(c, m["componentConfig"], res) + + return r +} + +// expandClusterLoggingConfigComponentConfigMap expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigMap(c *Client, f map[string]ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigComponentConfigSlice expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigSlice(c *Client, f []ClusterLoggingConfigComponentConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigComponentConfigMap flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigSlice flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfig{} + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfigComponentConfig expands an instance of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfig(c *Client, f *ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableComponents; v != nil { + m["enableComponents"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfigComponentConfig flattens an instance of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfigComponentConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfigComponentConfig + } + r.EnableComponents = flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c, m["enableComponents"], res) + + return r +} + +// expandClusterMonitoringConfigMap expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigMap(c *Client, f map[string]ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigSlice expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigSlice(c *Client, f []ClusterMonitoringConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigMap flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfig{} + } + + items := make(map[string]ClusterMonitoringConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigSlice flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfig{} + } + + items := make([]ClusterMonitoringConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfig expands an instance of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfig(c *Client, f *ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, f.ManagedPrometheusConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedPrometheusConfig into managedPrometheusConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedPrometheusConfig"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfig flattens an instance of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfig + } + r.ManagedPrometheusConfig = flattenClusterMonitoringConfigManagedPrometheusConfig(c, m["managedPrometheusConfig"], res) + + return r +} + +// expandClusterMonitoringConfigManagedPrometheusConfigMap expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, f map[string]ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigManagedPrometheusConfigSlice expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, f []ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigMap flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make(map[string]ClusterMonitoringConfigManagedPrometheusConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigSlice flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfigManagedPrometheusConfig expands an instance of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfig(c *Client, f *ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfig flattens an instance of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfigManagedPrometheusConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfigManagedPrometheusConfig + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + + return r +} + +{{- end }} +// expandClusterBinaryAuthorizationMap expands the contents of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorizationMap(c *Client, f map[string]ClusterBinaryAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterBinaryAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterBinaryAuthorizationSlice expands the contents of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorizationSlice(c *Client, f []ClusterBinaryAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterBinaryAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterBinaryAuthorizationMap flattens the contents of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterBinaryAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterBinaryAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterBinaryAuthorization{} + } + + items := make(map[string]ClusterBinaryAuthorization) + for k, item := range a { + items[k] = *flattenClusterBinaryAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterBinaryAuthorizationSlice flattens the contents of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterBinaryAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterBinaryAuthorization{} + } + + if len(a) == 0 { + return []ClusterBinaryAuthorization{} + } + + items := make([]ClusterBinaryAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterBinaryAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterBinaryAuthorization expands an instance of ClusterBinaryAuthorization into a JSON +// request object. +func expandClusterBinaryAuthorization(c *Client, f *ClusterBinaryAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EvaluationMode; !dcl.IsEmptyValueIndirect(v) { + m["evaluationMode"] = v + } + + return m, nil +} + +// flattenClusterBinaryAuthorization flattens an instance of ClusterBinaryAuthorization from a JSON +// response object. +func flattenClusterBinaryAuthorization(c *Client, i interface{}, res *Cluster) *ClusterBinaryAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterBinaryAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterBinaryAuthorization + } + r.EvaluationMode = flattenClusterBinaryAuthorizationEvaluationModeEnum(m["evaluationMode"]) + + return r +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnumMap flattens the contents of ClusterControlPlaneRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + items := make(map[string]ClusterControlPlaneRootVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnumSlice flattens the contents of ClusterControlPlaneRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolumeVolumeTypeEnum{} + } + + items := make([]ClusterControlPlaneRootVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneRootVolumeVolumeTypeEnum with the same value as that string. +func flattenClusterControlPlaneRootVolumeVolumeTypeEnum(i interface{}) *ClusterControlPlaneRootVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneRootVolumeVolumeTypeEnumRef(s) +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnumMap flattens the contents of ClusterControlPlaneMainVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + items := make(map[string]ClusterControlPlaneMainVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnumSlice flattens the contents of ClusterControlPlaneMainVolumeVolumeTypeEnum from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolumeVolumeTypeEnum{} + } + + items := make([]ClusterControlPlaneMainVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneMainVolumeVolumeTypeEnum with the same value as that string. +func flattenClusterControlPlaneMainVolumeVolumeTypeEnum(i interface{}) *ClusterControlPlaneMainVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneMainVolumeVolumeTypeEnumRef(s) +} + +{{- if ne $.TargetVersionName "ga" }} +// flattenClusterControlPlaneInstancePlacementTenancyEnumMap flattens the contents of ClusterControlPlaneInstancePlacementTenancyEnum from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementTenancyEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneInstancePlacementTenancyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + items := make(map[string]ClusterControlPlaneInstancePlacementTenancyEnum) + for k, item := range a { + items[k] = *flattenClusterControlPlaneInstancePlacementTenancyEnum(item.(interface{})) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementTenancyEnumSlice flattens the contents of ClusterControlPlaneInstancePlacementTenancyEnum from a JSON +// response object. +func flattenClusterControlPlaneInstancePlacementTenancyEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneInstancePlacementTenancyEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return []ClusterControlPlaneInstancePlacementTenancyEnum{} + } + + items := make([]ClusterControlPlaneInstancePlacementTenancyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneInstancePlacementTenancyEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterControlPlaneInstancePlacementTenancyEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterControlPlaneInstancePlacementTenancyEnum with the same value as that string. +func flattenClusterControlPlaneInstancePlacementTenancyEnum(i interface{}) *ClusterControlPlaneInstancePlacementTenancyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterControlPlaneInstancePlacementTenancyEnumRef(s) +} + +{{- end }} +// flattenClusterStateEnumMap flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStateEnum{} + } + + items := make(map[string]ClusterStateEnum) + for k, item := range a { + items[k] = *flattenClusterStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStateEnumSlice flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStateEnum{} + } + + if len(a) == 0 { + return []ClusterStateEnum{} + } + + items := make([]ClusterStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStateEnum with the same value as that string. +func flattenClusterStateEnum(i interface{}) *ClusterStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStateEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the same value as that string. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(i interface{}) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s) +{{- end }} +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnumMap flattens the contents of ClusterBinaryAuthorizationEvaluationModeEnum from a JSON +// response object. +func flattenClusterBinaryAuthorizationEvaluationModeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterBinaryAuthorizationEvaluationModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + items := make(map[string]ClusterBinaryAuthorizationEvaluationModeEnum) + for k, item := range a { + items[k] = *flattenClusterBinaryAuthorizationEvaluationModeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnumSlice flattens the contents of ClusterBinaryAuthorizationEvaluationModeEnum from a JSON +// response object. +func flattenClusterBinaryAuthorizationEvaluationModeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterBinaryAuthorizationEvaluationModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + if len(a) == 0 { + return []ClusterBinaryAuthorizationEvaluationModeEnum{} + } + + items := make([]ClusterBinaryAuthorizationEvaluationModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterBinaryAuthorizationEvaluationModeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterBinaryAuthorizationEvaluationModeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterBinaryAuthorizationEvaluationModeEnum with the same value as that string. +func flattenClusterBinaryAuthorizationEvaluationModeEnum(i interface{}) *ClusterBinaryAuthorizationEvaluationModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterBinaryAuthorizationEvaluationModeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Cluster) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalCluster(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clusterDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clusterApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clusterDiff + // For each operation name, create a clusterDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { + switch opName { + + case "updateClusterUpdateAwsClusterOperation": + return &updateClusterUpdateAwsClusterOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClusterFields(r *Cluster) error { + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := extractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := extractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := extractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := extractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := extractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := extractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := extractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + vBinaryAuthorization := r.BinaryAuthorization + if vBinaryAuthorization == nil { + // note: explicitly not the empty object. + vBinaryAuthorization = &ClusterBinaryAuthorization{} + } + if err := extractClusterBinaryAuthorizationFields(r, vBinaryAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBinaryAuthorization) { + r.BinaryAuthorization = vBinaryAuthorization + } + return nil +} +func extractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func extractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &ClusterControlPlaneConfigEncryption{} + } + if err := extractClusterControlPlaneConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vAwsServicesAuthentication := o.AwsServicesAuthentication + if vAwsServicesAuthentication == nil { + // note: explicitly not the empty object. + vAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{} + } + if err := extractClusterControlPlaneAwsServicesAuthenticationFields(r, vAwsServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAwsServicesAuthentication) { + o.AwsServicesAuthentication = vAwsServicesAuthentication + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &ClusterControlPlaneInstancePlacement{} + } + if err := extractClusterControlPlaneInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + return nil +} +func extractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func extractClusterControlPlaneConfigEncryptionFields(r *Cluster, o *ClusterControlPlaneConfigEncryption) error { + return nil +} +func extractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func extractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func extractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func extractClusterControlPlaneAwsServicesAuthenticationFields(r *Cluster, o *ClusterControlPlaneAwsServicesAuthentication) error { + return nil +} +func extractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractClusterControlPlaneInstancePlacementFields(r *Cluster, o *ClusterControlPlaneInstancePlacement) error { + return nil +} +{{- end }} +func extractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func extractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func extractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func extractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func extractClusterFleetFields(r *Cluster, o *ClusterFleet) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func extractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func extractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func extractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func extractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { +{{- end }} + return nil +} +func extractClusterBinaryAuthorizationFields(r *Cluster, o *ClusterBinaryAuthorization) error { + return nil +} + +func postReadExtractClusterFields(r *Cluster) error { + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := postReadExtractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := postReadExtractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := postReadExtractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := postReadExtractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := postReadExtractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := postReadExtractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := postReadExtractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + vBinaryAuthorization := r.BinaryAuthorization + if vBinaryAuthorization == nil { + // note: explicitly not the empty object. + vBinaryAuthorization = &ClusterBinaryAuthorization{} + } + if err := postReadExtractClusterBinaryAuthorizationFields(r, vBinaryAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBinaryAuthorization) { + r.BinaryAuthorization = vBinaryAuthorization + } + return nil +} +func postReadExtractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func postReadExtractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &ClusterControlPlaneConfigEncryption{} + } + if err := extractClusterControlPlaneConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vAwsServicesAuthentication := o.AwsServicesAuthentication + if vAwsServicesAuthentication == nil { + // note: explicitly not the empty object. + vAwsServicesAuthentication = &ClusterControlPlaneAwsServicesAuthentication{} + } + if err := extractClusterControlPlaneAwsServicesAuthenticationFields(r, vAwsServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAwsServicesAuthentication) { + o.AwsServicesAuthentication = vAwsServicesAuthentication + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &ClusterControlPlaneInstancePlacement{} + } + if err := extractClusterControlPlaneInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + return nil +} +func postReadExtractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func postReadExtractClusterControlPlaneConfigEncryptionFields(r *Cluster, o *ClusterControlPlaneConfigEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func postReadExtractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func postReadExtractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneAwsServicesAuthenticationFields(r *Cluster, o *ClusterControlPlaneAwsServicesAuthentication) error { + return nil +} +func postReadExtractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterControlPlaneInstancePlacementFields(r *Cluster, o *ClusterControlPlaneInstancePlacement) error { + return nil +} +{{- end }} +func postReadExtractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func postReadExtractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func postReadExtractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func postReadExtractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func postReadExtractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func postReadExtractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func postReadExtractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func postReadExtractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} +func postReadExtractClusterBinaryAuthorizationFields(r *Cluster, o *ClusterBinaryAuthorization) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl new file mode 100644 index 000000000000..ce4ea97c67fd --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl @@ -0,0 +1,754 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLClusterSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "ContainerAws/Cluster", + Description: "An Anthos cluster running on AWS.", + StructName: "Cluster", + Reference: &dcl.Link{ + Text: "API reference", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Multicloud overview", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Cluster": &dcl.Component{ + Title: "Cluster", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "networking", + "awsRegion", + "controlPlane", + "authorization", + "project", + "location", + "fleet", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + }, + "authorization": &dcl.Property{ + Type: "object", + GoName: "Authorization", + GoType: "ClusterAuthorization", + Description: "Configuration related to the cluster RBAC settings.", + Required: []string{ + "adminUsers", + }, + Properties: map[string]*dcl.Property{ + "adminGroups": &dcl.Property{ + Type: "array", + GoName: "AdminGroups", + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterAuthorizationAdminGroups", + Required: []string{ + "group", + }, + Properties: map[string]*dcl.Property{ + "group": &dcl.Property{ + Type: "string", + GoName: "Group", + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + }, + }, + "adminUsers": &dcl.Property{ + Type: "array", + GoName: "AdminUsers", + Description: "Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterAuthorizationAdminUsers", + Required: []string{ + "username", + }, + Properties: map[string]*dcl.Property{ + "username": &dcl.Property{ + Type: "string", + GoName: "Username", + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + }, + }, + }, + }, + "awsRegion": &dcl.Property{ + Type: "string", + GoName: "AwsRegion", + Description: "The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region.", + Immutable: true, + }, + "binaryAuthorization": &dcl.Property{ + Type: "object", + GoName: "BinaryAuthorization", + GoType: "ClusterBinaryAuthorization", + Description: "Configuration options for the Binary Authorization feature.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "evaluationMode": &dcl.Property{ + Type: "string", + GoName: "EvaluationMode", + GoType: "ClusterBinaryAuthorizationEvaluationModeEnum", + Description: "Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE", + ServerDefault: true, + Enum: []string{ + "DISABLED", + "PROJECT_SINGLETON_POLICY_ENFORCE", + }, + }, + }, + }, + "controlPlane": &dcl.Property{ + Type: "object", + GoName: "ControlPlane", + GoType: "ClusterControlPlane", + Description: "Configuration related to the cluster control plane.", + Required: []string{ + "version", + "subnetIds", + "configEncryption", + "iamInstanceProfile", + "databaseEncryption", + "awsServicesAuthentication", + }, + Properties: map[string]*dcl.Property{ + "awsServicesAuthentication": &dcl.Property{ + Type: "object", + GoName: "AwsServicesAuthentication", + GoType: "ClusterControlPlaneAwsServicesAuthentication", + Description: "Authentication configuration for management of AWS resources.", + Required: []string{ + "roleArn", + }, + Properties: map[string]*dcl.Property{ + "roleArn": &dcl.Property{ + Type: "string", + GoName: "RoleArn", + Description: "The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account.", + }, + "roleSessionName": &dcl.Property{ + Type: "string", + GoName: "RoleSessionName", + Description: "Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`.", + ServerDefault: true, + }, + }, + }, + "configEncryption": &dcl.Property{ + Type: "object", + GoName: "ConfigEncryption", + GoType: "ClusterControlPlaneConfigEncryption", + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + Required: []string{ + "kmsKeyArn", + }, + Properties: map[string]*dcl.Property{ + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + }, + }, + }, + "databaseEncryption": &dcl.Property{ + Type: "object", + GoName: "DatabaseEncryption", + GoType: "ClusterControlPlaneDatabaseEncryption", + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", + Immutable: true, + Required: []string{ + "kmsKeyArn", + }, + Properties: map[string]*dcl.Property{ + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", + Immutable: true, + }, + }, + }, + "iamInstanceProfile": &dcl.Property{ + Type: "string", + GoName: "IamInstanceProfile", + Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", + }, +{{- if ne $.TargetVersionName "ga" }} + "instancePlacement": &dcl.Property{ + Type: "object", + GoName: "InstancePlacement", + GoType: "ClusterControlPlaneInstancePlacement", + Description: "Details of placement information for an instance.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "tenancy": &dcl.Property{ + Type: "string", + GoName: "Tenancy", + GoType: "ClusterControlPlaneInstancePlacementTenancyEnum", + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "TENANCY_UNSPECIFIED", + "DEFAULT", + "DEDICATED", + "HOST", + }, + }, + }, + }, +{{- end }} + "instanceType": &dcl.Property{ + Type: "string", + GoName: "InstanceType", + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + ServerDefault: true, + }, + "mainVolume": &dcl.Property{ + Type: "object", + GoName: "MainVolume", + GoType: "ClusterControlPlaneMainVolume", + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "iops": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Iops", + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + Immutable: true, + ServerDefault: true, + }, + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + Immutable: true, + }, + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + Immutable: true, + ServerDefault: true, + }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + Immutable: true, + ServerDefault: true, + }, + "volumeType": &dcl.Property{ + Type: "string", + GoName: "VolumeType", + GoType: "ClusterControlPlaneMainVolumeVolumeTypeEnum", + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "VOLUME_TYPE_UNSPECIFIED", + "GP2", + "GP3", + }, + }, + }, + }, + "proxyConfig": &dcl.Property{ + Type: "object", + GoName: "ProxyConfig", + GoType: "ClusterControlPlaneProxyConfig", + Description: "Proxy configuration for outbound HTTP(S) traffic.", + Required: []string{ + "secretArn", + "secretVersion", + }, + Properties: map[string]*dcl.Property{ + "secretArn": &dcl.Property{ + Type: "string", + GoName: "SecretArn", + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + "secretVersion": &dcl.Property{ + Type: "string", + GoName: "SecretVersion", + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + }, + "rootVolume": &dcl.Property{ + Type: "object", + GoName: "RootVolume", + GoType: "ClusterControlPlaneRootVolume", + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "iops": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Iops", + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + ServerDefault: true, + }, + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + ServerDefault: true, + }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + ServerDefault: true, + }, + "volumeType": &dcl.Property{ + Type: "string", + GoName: "VolumeType", + GoType: "ClusterControlPlaneRootVolumeVolumeTypeEnum", + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + ServerDefault: true, + Enum: []string{ + "VOLUME_TYPE_UNSPECIFIED", + "GP2", + "GP3", + }, + }, + }, + }, + "securityGroupIds": &dcl.Property{ + Type: "array", + GoName: "SecurityGroupIds", + Description: "Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "sshConfig": &dcl.Property{ + Type: "object", + GoName: "SshConfig", + GoType: "ClusterControlPlaneSshConfig", + Description: "Optional. SSH configuration for how to access the underlying control plane machines.", + Required: []string{ + "ec2KeyPair", + }, + Properties: map[string]*dcl.Property{ + "ec2KeyPair": &dcl.Property{ + Type: "string", + GoName: "Ec2KeyPair", + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + }, + "subnetIds": &dcl.Property{ + Type: "array", + GoName: "SubnetIds", + Description: "The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "tags": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Tags", + Description: "Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .", + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time at which this cluster was created.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + "endpoint": &dcl.Property{ + Type: "string", + GoName: "Endpoint", + ReadOnly: true, + Description: "Output only. The endpoint of the cluster's API server.", + Immutable: true, + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "fleet": &dcl.Property{ + Type: "object", + GoName: "Fleet", + GoType: "ClusterFleet", + Description: "Fleet configuration.", + Immutable: true, + Required: []string{ + "project", + }, + Properties: map[string]*dcl.Property{ + "membership": &dcl.Property{ + Type: "string", + GoName: "Membership", + ReadOnly: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The number of the Fleet host project where this cluster will be registered.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + HasLongForm: true, + }, + }, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, +{{- if ne $.TargetVersionName "ga" }} + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "ClusterLoggingConfig", + Description: "Logging configuration.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "componentConfig": &dcl.Property{ + Type: "object", + GoName: "ComponentConfig", + GoType: "ClusterLoggingConfigComponentConfig", + Description: "Configuration of the logging components.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "enableComponents": &dcl.Property{ + Type: "array", + GoName: "EnableComponents", + Description: "Components of the logging configuration to be enabled.", + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", + Enum: []string{ + "COMPONENT_UNSPECIFIED", + "SYSTEM_COMPONENTS", + "WORKLOADS", + }, + }, + }, + }, + }, + }, + }, + "monitoringConfig": &dcl.Property{ + Type: "object", + GoName: "MonitoringConfig", + GoType: "ClusterMonitoringConfig", + Description: "Monitoring configuration.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "managedPrometheusConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedPrometheusConfig", + GoType: "ClusterMonitoringConfigManagedPrometheusConfig", + Description: "Configuration of the Google Cloud Managed Service for Prometheus.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Configuration of the enable Managed Collection.", + ServerDefault: true, + }, + }, + }, + }, + }, +{{- end }} + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of this resource.", + Immutable: true, + HasLongForm: true, + }, + "networking": &dcl.Property{ + Type: "object", + GoName: "Networking", + GoType: "ClusterNetworking", + Description: "Cluster-wide networking configuration.", + Required: []string{ + "vpcId", + "podAddressCidrBlocks", + "serviceAddressCidrBlocks", + }, + Properties: map[string]*dcl.Property{ + "perNodePoolSgRulesDisabled": &dcl.Property{ + Type: "boolean", + GoName: "PerNodePoolSgRulesDisabled", + Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", + }, + "podAddressCidrBlocks": &dcl.Property{ + Type: "array", + GoName: "PodAddressCidrBlocks", + Description: "All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "serviceAddressCidrBlocks": &dcl.Property{ + Type: "array", + GoName: "ServiceAddressCidrBlocks", + Description: "All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "vpcId": &dcl.Property{ + Type: "string", + GoName: "VPCId", + Description: "The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", + Immutable: true, + }, + }, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "reconciling": &dcl.Property{ + Type: "boolean", + GoName: "Reconciling", + ReadOnly: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "ClusterStateEnum", + ReadOnly: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR", + "DEGRADED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A globally unique identifier for the cluster.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time at which this cluster was last updated.", + Immutable: true, + }, + "workloadIdentityConfig": &dcl.Property{ + Type: "object", + GoName: "WorkloadIdentityConfig", + GoType: "ClusterWorkloadIdentityConfig", + ReadOnly: true, + Description: "Output only. Workload Identity settings.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "identityProvider": &dcl.Property{ + Type: "string", + GoName: "IdentityProvider", + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + Immutable: true, + }, + "issuerUri": &dcl.Property{ + Type: "string", + GoName: "IssuerUri", + Description: "The OIDC issuer URL for this cluster.", + Immutable: true, + }, + "workloadPool": &dcl.Property{ + Type: "string", + GoName: "WorkloadPool", + Description: "The Workload Identity Pool associated to the cluster.", + Immutable: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl new file mode 100644 index 000000000000..1f8e1656635d --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl @@ -0,0 +1,1314 @@ +package containeraws + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type NodePool struct { + Name *string `json:"name"` + Version *string `json:"version"` + Config *NodePoolConfig `json:"config"` + Autoscaling *NodePoolAutoscaling `json:"autoscaling"` + SubnetId *string `json:"subnetId"` + State *NodePoolStateEnum `json:"state"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + MaxPodsConstraint *NodePoolMaxPodsConstraint `json:"maxPodsConstraint"` + Management *NodePoolManagement `json:"management"` + KubeletConfig *NodePoolKubeletConfig `json:"kubeletConfig"` + UpdateSettings *NodePoolUpdateSettings `json:"updateSettings"` + Project *string `json:"project"` + Location *string `json:"location"` + Cluster *string `json:"cluster"` +} + +func (r *NodePool) String() string { + return dcl.SprintResource(r) +} + +// The enum NodePoolConfigRootVolumeVolumeTypeEnum. +type NodePoolConfigRootVolumeVolumeTypeEnum string + +// NodePoolConfigRootVolumeVolumeTypeEnumRef returns a *NodePoolConfigRootVolumeVolumeTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigRootVolumeVolumeTypeEnumRef(s string) *NodePoolConfigRootVolumeVolumeTypeEnum { + v := NodePoolConfigRootVolumeVolumeTypeEnum(s) + return &v +} + +func (v NodePoolConfigRootVolumeVolumeTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"VOLUME_TYPE_UNSPECIFIED", "GP2", "GP3"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigRootVolumeVolumeTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum NodePoolConfigTaintsEffectEnum. +type NodePoolConfigTaintsEffectEnum string + +// NodePoolConfigTaintsEffectEnumRef returns a *NodePoolConfigTaintsEffectEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigTaintsEffectEnumRef(s string) *NodePoolConfigTaintsEffectEnum { + v := NodePoolConfigTaintsEffectEnum(s) + return &v +} + +func (v NodePoolConfigTaintsEffectEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"EFFECT_UNSPECIFIED", "NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigTaintsEffectEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- if ne $.TargetVersionName "ga" }} +// The enum NodePoolConfigInstancePlacementTenancyEnum. +type NodePoolConfigInstancePlacementTenancyEnum string + +// NodePoolConfigInstancePlacementTenancyEnumRef returns a *NodePoolConfigInstancePlacementTenancyEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolConfigInstancePlacementTenancyEnumRef(s string) *NodePoolConfigInstancePlacementTenancyEnum { + v := NodePoolConfigInstancePlacementTenancyEnum(s) + return &v +} + +func (v NodePoolConfigInstancePlacementTenancyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TENANCY_UNSPECIFIED", "DEFAULT", "DEDICATED", "HOST"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolConfigInstancePlacementTenancyEnum", + Value: string(v), + Valid: []string{}, + } +} + +{{- end }} +// The enum NodePoolStateEnum. +type NodePoolStateEnum string + +// NodePoolStateEnumRef returns a *NodePoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolStateEnumRef(s string) *NodePoolStateEnum { + v := NodePoolStateEnum(s) + return &v +} + +func (v NodePoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum NodePoolKubeletConfigCpuManagerPolicyEnum. +type NodePoolKubeletConfigCpuManagerPolicyEnum string + +// NodePoolKubeletConfigCpuManagerPolicyEnumRef returns a *NodePoolKubeletConfigCpuManagerPolicyEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolKubeletConfigCpuManagerPolicyEnumRef(s string) *NodePoolKubeletConfigCpuManagerPolicyEnum { + v := NodePoolKubeletConfigCpuManagerPolicyEnum(s) + return &v +} + +func (v NodePoolKubeletConfigCpuManagerPolicyEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"none", "static"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolKubeletConfigCpuManagerPolicyEnum", + Value: string(v), + Valid: []string{}, + } +} + +type NodePoolConfig struct { + empty bool `json:"-"` + InstanceType *string `json:"instanceType"` + RootVolume *NodePoolConfigRootVolume `json:"rootVolume"` + Taints []NodePoolConfigTaints `json:"taints"` + Labels map[string]string `json:"labels"` + Tags map[string]string `json:"tags"` + IamInstanceProfile *string `json:"iamInstanceProfile"` + ConfigEncryption *NodePoolConfigConfigEncryption `json:"configEncryption"` + SshConfig *NodePoolConfigSshConfig `json:"sshConfig"` +{{- if ne $.TargetVersionName "ga" }} + SpotConfig *NodePoolConfigSpotConfig `json:"spotConfig"` +{{- end }} + SecurityGroupIds []string `json:"securityGroupIds"` + ProxyConfig *NodePoolConfigProxyConfig `json:"proxyConfig"` +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement *NodePoolConfigInstancePlacement `json:"instancePlacement"` + ImageType *string `json:"imageType"` +{{- end }} + AutoscalingMetricsCollection *NodePoolConfigAutoscalingMetricsCollection `json:"autoscalingMetricsCollection"` +} + +type jsonNodePoolConfig NodePoolConfig + +func (r *NodePoolConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfig + } else { + + r.InstanceType = res.InstanceType + + r.RootVolume = res.RootVolume + + r.Taints = res.Taints + + r.Labels = res.Labels + + r.Tags = res.Tags + + r.IamInstanceProfile = res.IamInstanceProfile + + r.ConfigEncryption = res.ConfigEncryption + + r.SshConfig = res.SshConfig + +{{- if ne $.TargetVersionName "ga" }} + r.SpotConfig = res.SpotConfig + +{{- end }} + r.SecurityGroupIds = res.SecurityGroupIds + + r.ProxyConfig = res.ProxyConfig +{{- if ne $.TargetVersionName "ga" }} + + r.InstancePlacement = res.InstancePlacement + + r.ImageType = res.ImageType +{{- end }} + + r.AutoscalingMetricsCollection = res.AutoscalingMetricsCollection + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfig *NodePoolConfig = &NodePoolConfig{empty: true} + +func (r *NodePoolConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` + VolumeType *NodePoolConfigRootVolumeVolumeTypeEnum `json:"volumeType"` + Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonNodePoolConfigRootVolume NodePoolConfigRootVolume + +func (r *NodePoolConfigRootVolume) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigRootVolume + } else { + + r.SizeGib = res.SizeGib + + r.VolumeType = res.VolumeType + + r.Iops = res.Iops + + r.Throughput = res.Throughput + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigRootVolume *NodePoolConfigRootVolume = &NodePoolConfigRootVolume{empty: true} + +func (r *NodePoolConfigRootVolume) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigTaints struct { + empty bool `json:"-"` + Key *string `json:"key"` + Value *string `json:"value"` + Effect *NodePoolConfigTaintsEffectEnum `json:"effect"` +} + +type jsonNodePoolConfigTaints NodePoolConfigTaints + +func (r *NodePoolConfigTaints) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigTaints + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigTaints + } else { + + r.Key = res.Key + + r.Value = res.Value + + r.Effect = res.Effect + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigTaints is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigTaints *NodePoolConfigTaints = &NodePoolConfigTaints{empty: true} + +func (r *NodePoolConfigTaints) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigTaints) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigTaints) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigConfigEncryption struct { + empty bool `json:"-"` + KmsKeyArn *string `json:"kmsKeyArn"` +} + +type jsonNodePoolConfigConfigEncryption NodePoolConfigConfigEncryption + +func (r *NodePoolConfigConfigEncryption) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigConfigEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigConfigEncryption + } else { + + r.KmsKeyArn = res.KmsKeyArn + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigConfigEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigConfigEncryption *NodePoolConfigConfigEncryption = &NodePoolConfigConfigEncryption{empty: true} + +func (r *NodePoolConfigConfigEncryption) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigConfigEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigConfigEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigSshConfig struct { + empty bool `json:"-"` + Ec2KeyPair *string `json:"ec2KeyPair"` +} + +type jsonNodePoolConfigSshConfig NodePoolConfigSshConfig + +func (r *NodePoolConfigSshConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSshConfig + } else { + + r.Ec2KeyPair = res.Ec2KeyPair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSshConfig *NodePoolConfigSshConfig = &NodePoolConfigSshConfig{empty: true} + +func (r *NodePoolConfigSshConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type NodePoolConfigSpotConfig struct { + empty bool `json:"-"` + InstanceTypes []string `json:"instanceTypes"` +} + +type jsonNodePoolConfigSpotConfig NodePoolConfigSpotConfig + +func (r *NodePoolConfigSpotConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSpotConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSpotConfig + } else { + + r.InstanceTypes = res.InstanceTypes + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSpotConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSpotConfig *NodePoolConfigSpotConfig = &NodePoolConfigSpotConfig{empty: true} + +func (r *NodePoolConfigSpotConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSpotConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSpotConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type NodePoolConfigProxyConfig struct { + empty bool `json:"-"` + SecretArn *string `json:"secretArn"` + SecretVersion *string `json:"secretVersion"` +} + +type jsonNodePoolConfigProxyConfig NodePoolConfigProxyConfig + +func (r *NodePoolConfigProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigProxyConfig + } else { + + r.SecretArn = res.SecretArn + + r.SecretVersion = res.SecretVersion + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigProxyConfig *NodePoolConfigProxyConfig = &NodePoolConfigProxyConfig{empty: true} + +func (r *NodePoolConfigProxyConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigProxyConfig) HashCode() string { +{{- if ne $.TargetVersionName "ga" }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigInstancePlacement struct { + empty bool `json:"-"` + Tenancy *NodePoolConfigInstancePlacementTenancyEnum `json:"tenancy"` +} + +type jsonNodePoolConfigInstancePlacement NodePoolConfigInstancePlacement + +func (r *NodePoolConfigInstancePlacement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigInstancePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigInstancePlacement + } else { + + r.Tenancy = res.Tenancy + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigInstancePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigInstancePlacement *NodePoolConfigInstancePlacement = &NodePoolConfigInstancePlacement{empty: true} + +func (r *NodePoolConfigInstancePlacement) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigInstancePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigInstancePlacement) HashCode() string { +{{- end }} + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigAutoscalingMetricsCollection struct { + empty bool `json:"-"` + Granularity *string `json:"granularity"` + Metrics []string `json:"metrics"` +} + +type jsonNodePoolConfigAutoscalingMetricsCollection NodePoolConfigAutoscalingMetricsCollection + +func (r *NodePoolConfigAutoscalingMetricsCollection) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigAutoscalingMetricsCollection + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigAutoscalingMetricsCollection + } else { + + r.Granularity = res.Granularity + + r.Metrics = res.Metrics + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigAutoscalingMetricsCollection is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigAutoscalingMetricsCollection *NodePoolConfigAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{empty: true} + +func (r *NodePoolConfigAutoscalingMetricsCollection) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigAutoscalingMetricsCollection) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigAutoscalingMetricsCollection) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolAutoscaling struct { + empty bool `json:"-"` + MinNodeCount *int64 `json:"minNodeCount"` + MaxNodeCount *int64 `json:"maxNodeCount"` +} + +type jsonNodePoolAutoscaling NodePoolAutoscaling + +func (r *NodePoolAutoscaling) UnmarshalJSON(data []byte) error { + var res jsonNodePoolAutoscaling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolAutoscaling + } else { + + r.MinNodeCount = res.MinNodeCount + + r.MaxNodeCount = res.MaxNodeCount + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolAutoscaling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolAutoscaling *NodePoolAutoscaling = &NodePoolAutoscaling{empty: true} + +func (r *NodePoolAutoscaling) Empty() bool { + return r.empty +} + +func (r *NodePoolAutoscaling) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolAutoscaling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolMaxPodsConstraint struct { + empty bool `json:"-"` + MaxPodsPerNode *int64 `json:"maxPodsPerNode"` +} + +type jsonNodePoolMaxPodsConstraint NodePoolMaxPodsConstraint + +func (r *NodePoolMaxPodsConstraint) UnmarshalJSON(data []byte) error { + var res jsonNodePoolMaxPodsConstraint + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolMaxPodsConstraint + } else { + + r.MaxPodsPerNode = res.MaxPodsPerNode + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolMaxPodsConstraint is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolMaxPodsConstraint *NodePoolMaxPodsConstraint = &NodePoolMaxPodsConstraint{empty: true} + +func (r *NodePoolMaxPodsConstraint) Empty() bool { + return r.empty +} + +func (r *NodePoolMaxPodsConstraint) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolMaxPodsConstraint) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolManagement struct { + empty bool `json:"-"` + AutoRepair *bool `json:"autoRepair"` +} + +type jsonNodePoolManagement NodePoolManagement + +func (r *NodePoolManagement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolManagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolManagement + } else { + + r.AutoRepair = res.AutoRepair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolManagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolManagement *NodePoolManagement = &NodePoolManagement{empty: true} + +func (r *NodePoolManagement) Empty() bool { + return r.empty +} + +func (r *NodePoolManagement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolManagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolKubeletConfig struct { + empty bool `json:"-"` + CpuManagerPolicy *NodePoolKubeletConfigCpuManagerPolicyEnum `json:"cpuManagerPolicy"` + CpuCfsQuota *bool `json:"cpuCfsQuota"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod"` + PodPidsLimit *int64 `json:"podPidsLimit"` +} + +type jsonNodePoolKubeletConfig NodePoolKubeletConfig + +func (r *NodePoolKubeletConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolKubeletConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolKubeletConfig + } else { + + r.CpuManagerPolicy = res.CpuManagerPolicy + + r.CpuCfsQuota = res.CpuCfsQuota + + r.CpuCfsQuotaPeriod = res.CpuCfsQuotaPeriod + + r.PodPidsLimit = res.PodPidsLimit + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolKubeletConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolKubeletConfig *NodePoolKubeletConfig = &NodePoolKubeletConfig{empty: true} + +func (r *NodePoolKubeletConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolKubeletConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolKubeletConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolUpdateSettings struct { + empty bool `json:"-"` + SurgeSettings *NodePoolUpdateSettingsSurgeSettings `json:"surgeSettings"` +} + +type jsonNodePoolUpdateSettings NodePoolUpdateSettings + +func (r *NodePoolUpdateSettings) UnmarshalJSON(data []byte) error { + var res jsonNodePoolUpdateSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolUpdateSettings + } else { + + r.SurgeSettings = res.SurgeSettings + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolUpdateSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolUpdateSettings *NodePoolUpdateSettings = &NodePoolUpdateSettings{empty: true} + +func (r *NodePoolUpdateSettings) Empty() bool { + return r.empty +} + +func (r *NodePoolUpdateSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolUpdateSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolUpdateSettingsSurgeSettings struct { + empty bool `json:"-"` + MaxSurge *int64 `json:"maxSurge"` + MaxUnavailable *int64 `json:"maxUnavailable"` +} + +type jsonNodePoolUpdateSettingsSurgeSettings NodePoolUpdateSettingsSurgeSettings + +func (r *NodePoolUpdateSettingsSurgeSettings) UnmarshalJSON(data []byte) error { + var res jsonNodePoolUpdateSettingsSurgeSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolUpdateSettingsSurgeSettings + } else { + + r.MaxSurge = res.MaxSurge + + r.MaxUnavailable = res.MaxUnavailable + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolUpdateSettingsSurgeSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolUpdateSettingsSurgeSettings *NodePoolUpdateSettingsSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{empty: true} + +func (r *NodePoolUpdateSettingsSurgeSettings) Empty() bool { + return r.empty +} + +func (r *NodePoolUpdateSettingsSurgeSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolUpdateSettingsSurgeSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *NodePool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_aws", + Type: "NodePool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containeraws", +{{- end }} + } +} + +func (r *NodePool) ID() (string, error) { + if err := extractNodePoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "config": dcl.ValueOrEmptyString(nr.Config), + "autoscaling": dcl.ValueOrEmptyString(nr.Autoscaling), + "subnet_id": dcl.ValueOrEmptyString(nr.SubnetId), + "state": dcl.ValueOrEmptyString(nr.State), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "max_pods_constraint": dcl.ValueOrEmptyString(nr.MaxPodsConstraint), + "management": dcl.ValueOrEmptyString(nr.Management), + "kubelet_config": dcl.ValueOrEmptyString(nr.KubeletConfig), + "update_settings": dcl.ValueOrEmptyString(nr.UpdateSettings), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const NodePoolMaxPage = -1 + +type NodePoolList struct { + Items []*NodePool + + nextToken string + + pageSize int32 + + resource *NodePool +} + +func (l *NodePoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *NodePoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listNodePool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListNodePool(ctx context.Context, project, location, cluster string) (*NodePoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListNodePoolWithMaxResults(ctx, project, location, cluster, NodePoolMaxPage) + +} + +func (c *Client) ListNodePoolWithMaxResults(ctx context.Context, project, location, cluster string, pageSize int32) (*NodePoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &NodePool{ + Project: &project, + Location: &location, + Cluster: &cluster, + } + items, token, err := c.listNodePool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &NodePoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractNodePoolFields(r) + + b, err := c.getNodePoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalNodePool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeNodePoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractNodePoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteNodePool(ctx context.Context, r *NodePool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("NodePool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting NodePool...") + deleteOp := deleteNodePoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllNodePool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllNodePool(ctx context.Context, project, location, cluster string, filter func(*NodePool) bool) error { + listObj, err := c.ListNodePool(ctx, project, location, cluster) + if err != nil { + return err + } + + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyNodePool(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *NodePool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyNodePoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyNodePoolHelper(c *Client, ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyNodePool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractNodePoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.nodePoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToNodePoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []nodePoolApiOperation + if create { + ops = append(ops, &createNodePoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyNodePoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyNodePoolDiff(c *Client, ctx context.Context, desired *NodePool, rawDesired *NodePool, ops []nodePoolApiOperation, opts ...dcl.ApplyOption) (*NodePool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetNodePool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createNodePoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapNodePool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeNodePoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeNodePoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeNodePoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractNodePoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractNodePoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffNodePool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl new file mode 100644 index 000000000000..24747a2267ca --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/node_pool_internal.go.tmpl @@ -0,0 +1,6289 @@ +package containeraws + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *NodePool) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if err := dcl.Required(r, "autoscaling"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "maxPodsConstraint"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Cluster, "Cluster"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Autoscaling) { + if err := r.Autoscaling.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MaxPodsConstraint) { + if err := r.MaxPodsConstraint.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Management) { + if err := r.Management.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.KubeletConfig) { + if err := r.KubeletConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.UpdateSettings) { + if err := r.UpdateSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfig) validate() error { + if err := dcl.Required(r, "iamInstanceProfile"); err != nil { + return err + } + if err := dcl.Required(r, "configEncryption"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ConfigEncryption) { + if err := r.ConfigEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.SpotConfig) { + if err := r.SpotConfig.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.InstancePlacement) { + if err := r.InstancePlacement.validate(); err != nil { + return err + } + } +{{- end }} + if !dcl.IsEmptyValueIndirect(r.AutoscalingMetricsCollection) { + if err := r.AutoscalingMetricsCollection.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfigRootVolume) validate() error { + return nil +} +func (r *NodePoolConfigTaints) validate() error { + if err := dcl.Required(r, "key"); err != nil { + return err + } + if err := dcl.Required(r, "value"); err != nil { + return err + } + if err := dcl.Required(r, "effect"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigConfigEncryption) validate() error { + if err := dcl.Required(r, "kmsKeyArn"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigSshConfig) validate() error { + if err := dcl.Required(r, "ec2KeyPair"); err != nil { + return err + } + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *NodePoolConfigSpotConfig) validate() error { + if err := dcl.Required(r, "instanceTypes"); err != nil { + return err + } + return nil +} +{{- end }} +func (r *NodePoolConfigProxyConfig) validate() error { + if err := dcl.Required(r, "secretArn"); err != nil { + return err + } + if err := dcl.Required(r, "secretVersion"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *NodePoolConfigInstancePlacement) validate() error { +{{- end }} + return nil +} +func (r *NodePoolConfigAutoscalingMetricsCollection) validate() error { + if err := dcl.Required(r, "granularity"); err != nil { + return err + } + return nil +} +func (r *NodePoolAutoscaling) validate() error { + if err := dcl.Required(r, "minNodeCount"); err != nil { + return err + } + if err := dcl.Required(r, "maxNodeCount"); err != nil { + return err + } + return nil +} +func (r *NodePoolMaxPodsConstraint) validate() error { + if err := dcl.Required(r, "maxPodsPerNode"); err != nil { + return err + } + return nil +} +func (r *NodePoolManagement) validate() error { + return nil +} +func (r *NodePoolKubeletConfig) validate() error { + return nil +} +func (r *NodePoolUpdateSettings) validate() error { + if !dcl.IsEmptyValueIndirect(r.SurgeSettings) { + if err := r.SurgeSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolUpdateSettingsSurgeSettings) validate() error { + return nil +} +func (r *NodePool) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *NodePool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *NodePool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools?awsNodePoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// nodePoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type nodePoolApiOperation interface { + do(context.Context, *NodePool, *Client) error +} + +// newUpdateNodePoolUpdateAwsNodePoolRequest creates a request for an +// NodePool resource's UpdateAwsNodePool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateNodePoolUpdateAwsNodePoolRequest(ctx context.Context, f *NodePool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + req["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["autoscaling"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["management"] = v + } + if v, err := expandNodePoolUpdateSettings(c, f.UpdateSettings, res); err != nil { + return nil, fmt.Errorf("error expanding UpdateSettings into updateSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["updateSettings"] = v + } + b, err := c.getNodePoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateNodePoolUpdateAwsNodePoolRequest converts the update into +// the final JSON request body. +func marshalUpdateNodePoolUpdateAwsNodePoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateNodePoolUpdateAwsNodePoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateNodePoolUpdateAwsNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + _, err := c.GetNodePool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAwsNodePool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateNodePoolUpdateAwsNodePoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateNodePoolUpdateAwsNodePoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listNodePoolRaw(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != NodePoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listNodePoolOperation struct { + AwsNodePools []map[string]interface{} `json:"awsNodePools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listNodePool(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]*NodePool, string, error) { + b, err := c.listNodePoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listNodePoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*NodePool + for _, v := range m.AwsNodePools { + res, err := unmarshalMapNodePool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Cluster = r.Cluster + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllNodePool(ctx context.Context, f func(*NodePool) bool, resources []*NodePool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteNodePool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteNodePoolOperation struct{} + +func (op *deleteNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + r, err := c.GetNodePool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "NodePool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetNodePool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetNodePool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createNodePoolOperation struct { + response map[string]interface{} +} + +func (op *createNodePoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetNodePool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getNodePoolRaw(ctx context.Context, r *NodePool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) nodePoolDiffsForRawDesired(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (initial, desired *NodePool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *NodePool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*NodePool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected NodePool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetNodePool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a NodePool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve NodePool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that NodePool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for NodePool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for NodePool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractNodePoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeNodePoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for NodePool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for NodePool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffNodePool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeNodePoolInitialState(rawInitial, rawDesired *NodePool) (*NodePool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, nil, opts...) + rawDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, nil, opts...) + rawDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, nil, opts...) + rawDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, nil, opts...) + rawDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, nil, opts...) + rawDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &NodePool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Version, rawInitial.Version) { + canonicalDesired.Version = rawInitial.Version + } else { + canonicalDesired.Version = rawDesired.Version + } + canonicalDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, rawInitial.Config, opts...) + canonicalDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, rawInitial.Autoscaling, opts...) + if dcl.StringCanonicalize(rawDesired.SubnetId, rawInitial.SubnetId) { + canonicalDesired.SubnetId = rawInitial.SubnetId + } else { + canonicalDesired.SubnetId = rawDesired.SubnetId + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, rawInitial.MaxPodsConstraint, opts...) + canonicalDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, rawInitial.Management, opts...) + canonicalDesired.KubeletConfig = canonicalizeNodePoolKubeletConfig(rawDesired.KubeletConfig, rawInitial.KubeletConfig, opts...) + canonicalDesired.UpdateSettings = canonicalizeNodePoolUpdateSettings(rawDesired.UpdateSettings, rawInitial.UpdateSettings, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Cluster, rawInitial.Cluster) { + canonicalDesired.Cluster = rawInitial.Cluster + } else { + canonicalDesired.Cluster = rawDesired.Cluster + } + return canonicalDesired, nil +} + +func canonicalizeNodePoolNewState(c *Client, rawNew, rawDesired *NodePool) (*NodePool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + if dcl.StringCanonicalize(rawDesired.Version, rawNew.Version) { + rawNew.Version = rawDesired.Version + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { + rawNew.Config = rawDesired.Config + } else { + rawNew.Config = canonicalizeNewNodePoolConfig(c, rawDesired.Config, rawNew.Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.Autoscaling) && dcl.IsEmptyValueIndirect(rawDesired.Autoscaling) { + rawNew.Autoscaling = rawDesired.Autoscaling + } else { + rawNew.Autoscaling = canonicalizeNewNodePoolAutoscaling(c, rawDesired.Autoscaling, rawNew.Autoscaling) + } + + if dcl.IsEmptyValueIndirect(rawNew.SubnetId) && dcl.IsEmptyValueIndirect(rawDesired.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } else { + if dcl.StringCanonicalize(rawDesired.SubnetId, rawNew.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.MaxPodsConstraint) && dcl.IsEmptyValueIndirect(rawDesired.MaxPodsConstraint) { + rawNew.MaxPodsConstraint = rawDesired.MaxPodsConstraint + } else { + rawNew.MaxPodsConstraint = canonicalizeNewNodePoolMaxPodsConstraint(c, rawDesired.MaxPodsConstraint, rawNew.MaxPodsConstraint) + } + + if dcl.IsEmptyValueIndirect(rawNew.Management) && dcl.IsEmptyValueIndirect(rawDesired.Management) { + rawNew.Management = rawDesired.Management + } else { + rawNew.Management = canonicalizeNewNodePoolManagement(c, rawDesired.Management, rawNew.Management) + } + + if dcl.IsEmptyValueIndirect(rawNew.KubeletConfig) && dcl.IsEmptyValueIndirect(rawDesired.KubeletConfig) { + rawNew.KubeletConfig = rawDesired.KubeletConfig + } else { + rawNew.KubeletConfig = canonicalizeNewNodePoolKubeletConfig(c, rawDesired.KubeletConfig, rawNew.KubeletConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateSettings) && dcl.IsEmptyValueIndirect(rawDesired.UpdateSettings) { + rawNew.UpdateSettings = rawDesired.UpdateSettings + } else { + rawNew.UpdateSettings = canonicalizeNewNodePoolUpdateSettings(c, rawDesired.UpdateSettings, rawNew.UpdateSettings) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Cluster = rawDesired.Cluster + + return rawNew, nil +} + +func canonicalizeNodePoolConfig(des, initial *NodePoolConfig, opts ...dcl.ApplyOption) *NodePoolConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfig{} + + if dcl.StringCanonicalize(des.InstanceType, initial.InstanceType) || dcl.IsZeroValue(des.InstanceType) { + cDes.InstanceType = initial.InstanceType + } else { + cDes.InstanceType = des.InstanceType + } + cDes.RootVolume = canonicalizeNodePoolConfigRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.Taints = canonicalizeNodePoolConfigTaintsSlice(des.Taints, initial.Taints, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.StringCanonicalize(des.IamInstanceProfile, initial.IamInstanceProfile) || dcl.IsZeroValue(des.IamInstanceProfile) { + cDes.IamInstanceProfile = initial.IamInstanceProfile + } else { + cDes.IamInstanceProfile = des.IamInstanceProfile + } + cDes.ConfigEncryption = canonicalizeNodePoolConfigConfigEncryption(des.ConfigEncryption, initial.ConfigEncryption, opts...) + cDes.SshConfig = canonicalizeNodePoolConfigSshConfig(des.SshConfig, initial.SshConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.SpotConfig = canonicalizeNodePoolConfigSpotConfig(des.SpotConfig, initial.SpotConfig, opts...) +{{- end }} + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, initial.SecurityGroupIds) { + cDes.SecurityGroupIds = initial.SecurityGroupIds + } else { + cDes.SecurityGroupIds = des.SecurityGroupIds + } + cDes.ProxyConfig = canonicalizeNodePoolConfigProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.InstancePlacement = canonicalizeNodePoolConfigInstancePlacement(des.InstancePlacement, initial.InstancePlacement, opts...) + if dcl.StringCanonicalize(des.ImageType, initial.ImageType) || dcl.IsZeroValue(des.ImageType) { + cDes.ImageType = initial.ImageType + } else { + cDes.ImageType = des.ImageType + } +{{- end }} + cDes.AutoscalingMetricsCollection = canonicalizeNodePoolConfigAutoscalingMetricsCollection(des.AutoscalingMetricsCollection, initial.AutoscalingMetricsCollection, opts...) + + return cDes +} + +func canonicalizeNodePoolConfigSlice(des, initial []NodePoolConfig, opts ...dcl.ApplyOption) []NodePoolConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfig(c *Client, des, nw *NodePoolConfig) *NodePoolConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceType, nw.InstanceType) { + nw.InstanceType = des.InstanceType + } + nw.RootVolume = canonicalizeNewNodePoolConfigRootVolume(c, des.RootVolume, nw.RootVolume) + nw.Taints = canonicalizeNewNodePoolConfigTaintsSlice(c, des.Taints, nw.Taints) + if dcl.StringCanonicalize(des.IamInstanceProfile, nw.IamInstanceProfile) { + nw.IamInstanceProfile = des.IamInstanceProfile + } + nw.ConfigEncryption = canonicalizeNewNodePoolConfigConfigEncryption(c, des.ConfigEncryption, nw.ConfigEncryption) + nw.SshConfig = canonicalizeNewNodePoolConfigSshConfig(c, des.SshConfig, nw.SshConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.SpotConfig = canonicalizeNewNodePoolConfigSpotConfig(c, des.SpotConfig, nw.SpotConfig) +{{- end }} + if dcl.StringArrayCanonicalize(des.SecurityGroupIds, nw.SecurityGroupIds) { + nw.SecurityGroupIds = des.SecurityGroupIds + } + nw.ProxyConfig = canonicalizeNewNodePoolConfigProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.InstancePlacement = canonicalizeNewNodePoolConfigInstancePlacement(c, des.InstancePlacement, nw.InstancePlacement) + if dcl.StringCanonicalize(des.ImageType, nw.ImageType) { + nw.ImageType = des.ImageType + } +{{- end }} + nw.AutoscalingMetricsCollection = canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, des.AutoscalingMetricsCollection, nw.AutoscalingMetricsCollection) + + return nw +} + +func canonicalizeNewNodePoolConfigSet(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSlice(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigRootVolume(des, initial *NodePoolConfigRootVolume, opts ...dcl.ApplyOption) *NodePoolConfigRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + if dcl.IsZeroValue(des.VolumeType) || (dcl.IsEmptyValueIndirect(des.VolumeType) && dcl.IsEmptyValueIndirect(initial.VolumeType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.VolumeType = initial.VolumeType + } else { + cDes.VolumeType = des.VolumeType + } + if dcl.IsZeroValue(des.Iops) || (dcl.IsEmptyValueIndirect(des.Iops) && dcl.IsEmptyValueIndirect(initial.Iops)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Iops = initial.Iops + } else { + cDes.Iops = des.Iops + } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeNodePoolConfigRootVolumeSlice(des, initial []NodePoolConfigRootVolume, opts ...dcl.ApplyOption) []NodePoolConfigRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigRootVolume(c *Client, des, nw *NodePoolConfigRootVolume) *NodePoolConfigRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewNodePoolConfigRootVolumeSet(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigRootVolumeSlice(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigTaints(des, initial *NodePoolConfigTaints, opts ...dcl.ApplyOption) *NodePoolConfigTaints { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigTaints{} + + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringCanonicalize(des.Value, initial.Value) || dcl.IsZeroValue(des.Value) { + cDes.Value = initial.Value + } else { + cDes.Value = des.Value + } + if dcl.IsZeroValue(des.Effect) || (dcl.IsEmptyValueIndirect(des.Effect) && dcl.IsEmptyValueIndirect(initial.Effect)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Effect = initial.Effect + } else { + cDes.Effect = des.Effect + } + + return cDes +} + +func canonicalizeNodePoolConfigTaintsSlice(des, initial []NodePoolConfigTaints, opts ...dcl.ApplyOption) []NodePoolConfigTaints { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigTaints, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigTaints(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigTaints, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigTaints(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigTaints(c *Client, des, nw *NodePoolConfigTaints) *NodePoolConfigTaints { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigTaints while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringCanonicalize(des.Value, nw.Value) { + nw.Value = des.Value + } + + return nw +} + +func canonicalizeNewNodePoolConfigTaintsSet(c *Client, des, nw []NodePoolConfigTaints) []NodePoolConfigTaints { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigTaints + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigTaintsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigTaints(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigTaintsSlice(c *Client, des, nw []NodePoolConfigTaints) []NodePoolConfigTaints { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigTaints + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigTaints(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigConfigEncryption(des, initial *NodePoolConfigConfigEncryption, opts ...dcl.ApplyOption) *NodePoolConfigConfigEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigConfigEncryption{} + + if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { + cDes.KmsKeyArn = initial.KmsKeyArn + } else { + cDes.KmsKeyArn = des.KmsKeyArn + } + + return cDes +} + +func canonicalizeNodePoolConfigConfigEncryptionSlice(des, initial []NodePoolConfigConfigEncryption, opts ...dcl.ApplyOption) []NodePoolConfigConfigEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigConfigEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigConfigEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigConfigEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigConfigEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigConfigEncryption(c *Client, des, nw *NodePoolConfigConfigEncryption) *NodePoolConfigConfigEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigConfigEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KmsKeyArn, nw.KmsKeyArn) { + nw.KmsKeyArn = des.KmsKeyArn + } + + return nw +} + +func canonicalizeNewNodePoolConfigConfigEncryptionSet(c *Client, des, nw []NodePoolConfigConfigEncryption) []NodePoolConfigConfigEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigConfigEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigConfigEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigConfigEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigConfigEncryptionSlice(c *Client, des, nw []NodePoolConfigConfigEncryption) []NodePoolConfigConfigEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigConfigEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigConfigEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigSshConfig(des, initial *NodePoolConfigSshConfig, opts ...dcl.ApplyOption) *NodePoolConfigSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSshConfig{} + + if dcl.StringCanonicalize(des.Ec2KeyPair, initial.Ec2KeyPair) || dcl.IsZeroValue(des.Ec2KeyPair) { + cDes.Ec2KeyPair = initial.Ec2KeyPair + } else { + cDes.Ec2KeyPair = des.Ec2KeyPair + } + + return cDes +} + +func canonicalizeNodePoolConfigSshConfigSlice(des, initial []NodePoolConfigSshConfig, opts ...dcl.ApplyOption) []NodePoolConfigSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSshConfig(c *Client, des, nw *NodePoolConfigSshConfig) *NodePoolConfigSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Ec2KeyPair, nw.Ec2KeyPair) { + nw.Ec2KeyPair = des.Ec2KeyPair + } + + return nw +} + +func canonicalizeNewNodePoolConfigSshConfigSet(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSshConfigSlice(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &n)) + } + + return items +} + +{{- if ne $.TargetVersionName "ga" }} +func canonicalizeNodePoolConfigSpotConfig(des, initial *NodePoolConfigSpotConfig, opts ...dcl.ApplyOption) *NodePoolConfigSpotConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSpotConfig{} + + if dcl.StringArrayCanonicalize(des.InstanceTypes, initial.InstanceTypes) { + cDes.InstanceTypes = initial.InstanceTypes + } else { + cDes.InstanceTypes = des.InstanceTypes + } + + return cDes +} + +func canonicalizeNodePoolConfigSpotConfigSlice(des, initial []NodePoolConfigSpotConfig, opts ...dcl.ApplyOption) []NodePoolConfigSpotConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSpotConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSpotConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSpotConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSpotConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSpotConfig(c *Client, des, nw *NodePoolConfigSpotConfig) *NodePoolConfigSpotConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSpotConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceTypes, nw.InstanceTypes) { + nw.InstanceTypes = des.InstanceTypes + } + + return nw +} + +func canonicalizeNewNodePoolConfigSpotConfigSet(c *Client, des, nw []NodePoolConfigSpotConfig) []NodePoolConfigSpotConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSpotConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSpotConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSpotConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSpotConfigSlice(c *Client, des, nw []NodePoolConfigSpotConfig) []NodePoolConfigSpotConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSpotConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSpotConfig(c, &d, &n)) + } + + return items +} + +{{- end }} +func canonicalizeNodePoolConfigProxyConfig(des, initial *NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) *NodePoolConfigProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigProxyConfig{} + + if dcl.StringCanonicalize(des.SecretArn, initial.SecretArn) || dcl.IsZeroValue(des.SecretArn) { + cDes.SecretArn = initial.SecretArn + } else { + cDes.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, initial.SecretVersion) || dcl.IsZeroValue(des.SecretVersion) { + cDes.SecretVersion = initial.SecretVersion + } else { + cDes.SecretVersion = des.SecretVersion + } + + return cDes +} + +func canonicalizeNodePoolConfigProxyConfigSlice(des, initial []NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) []NodePoolConfigProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigProxyConfig(c *Client, des, nw *NodePoolConfigProxyConfig) *NodePoolConfigProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SecretArn, nw.SecretArn) { + nw.SecretArn = des.SecretArn + } + if dcl.StringCanonicalize(des.SecretVersion, nw.SecretVersion) { + nw.SecretVersion = des.SecretVersion + } + + return nw +} + +func canonicalizeNewNodePoolConfigProxyConfigSet(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigProxyConfigSlice(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeNodePoolConfigInstancePlacement(des, initial *NodePoolConfigInstancePlacement, opts ...dcl.ApplyOption) *NodePoolConfigInstancePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigInstancePlacement{} + + if dcl.IsZeroValue(des.Tenancy) || (dcl.IsEmptyValueIndirect(des.Tenancy) && dcl.IsEmptyValueIndirect(initial.Tenancy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tenancy = initial.Tenancy + } else { + cDes.Tenancy = des.Tenancy + } + + return cDes +} + +func canonicalizeNodePoolConfigInstancePlacementSlice(des, initial []NodePoolConfigInstancePlacement, opts ...dcl.ApplyOption) []NodePoolConfigInstancePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigInstancePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigInstancePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigInstancePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigInstancePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigInstancePlacement(c *Client, des, nw *NodePoolConfigInstancePlacement) *NodePoolConfigInstancePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigInstancePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolConfigInstancePlacementSet(c *Client, des, nw []NodePoolConfigInstancePlacement) []NodePoolConfigInstancePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigInstancePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigInstancePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigInstancePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigInstancePlacementSlice(c *Client, des, nw []NodePoolConfigInstancePlacement) []NodePoolConfigInstancePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigInstancePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigInstancePlacement(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeNodePoolConfigAutoscalingMetricsCollection(des, initial *NodePoolConfigAutoscalingMetricsCollection, opts ...dcl.ApplyOption) *NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigAutoscalingMetricsCollection{} + + if dcl.StringCanonicalize(des.Granularity, initial.Granularity) || dcl.IsZeroValue(des.Granularity) { + cDes.Granularity = initial.Granularity + } else { + cDes.Granularity = des.Granularity + } + if dcl.StringArrayCanonicalize(des.Metrics, initial.Metrics) { + cDes.Metrics = initial.Metrics + } else { + cDes.Metrics = des.Metrics + } + + return cDes +} + +func canonicalizeNodePoolConfigAutoscalingMetricsCollectionSlice(des, initial []NodePoolConfigAutoscalingMetricsCollection, opts ...dcl.ApplyOption) []NodePoolConfigAutoscalingMetricsCollection { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigAutoscalingMetricsCollection(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigAutoscalingMetricsCollection(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c *Client, des, nw *NodePoolConfigAutoscalingMetricsCollection) *NodePoolConfigAutoscalingMetricsCollection { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigAutoscalingMetricsCollection while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Granularity, nw.Granularity) { + nw.Granularity = des.Granularity + } + if dcl.StringArrayCanonicalize(des.Metrics, nw.Metrics) { + nw.Metrics = des.Metrics + } + + return nw +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollectionSet(c *Client, des, nw []NodePoolConfigAutoscalingMetricsCollection) []NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigAutoscalingMetricsCollection + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigAutoscalingMetricsCollectionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, des, nw []NodePoolConfigAutoscalingMetricsCollection) []NodePoolConfigAutoscalingMetricsCollection { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigAutoscalingMetricsCollection + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigAutoscalingMetricsCollection(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolAutoscaling(des, initial *NodePoolAutoscaling, opts ...dcl.ApplyOption) *NodePoolAutoscaling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolAutoscaling{} + + if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinNodeCount = initial.MinNodeCount + } else { + cDes.MinNodeCount = des.MinNodeCount + } + if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxNodeCount = initial.MaxNodeCount + } else { + cDes.MaxNodeCount = des.MaxNodeCount + } + + return cDes +} + +func canonicalizeNodePoolAutoscalingSlice(des, initial []NodePoolAutoscaling, opts ...dcl.ApplyOption) []NodePoolAutoscaling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolAutoscaling, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolAutoscaling, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolAutoscaling(c *Client, des, nw *NodePoolAutoscaling) *NodePoolAutoscaling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolAutoscalingSet(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolAutoscaling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolAutoscalingSlice(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolAutoscaling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolMaxPodsConstraint(des, initial *NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) *NodePoolMaxPodsConstraint { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolMaxPodsConstraint{} + + if dcl.IsZeroValue(des.MaxPodsPerNode) || (dcl.IsEmptyValueIndirect(des.MaxPodsPerNode) && dcl.IsEmptyValueIndirect(initial.MaxPodsPerNode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxPodsPerNode = initial.MaxPodsPerNode + } else { + cDes.MaxPodsPerNode = des.MaxPodsPerNode + } + + return cDes +} + +func canonicalizeNodePoolMaxPodsConstraintSlice(des, initial []NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) []NodePoolMaxPodsConstraint { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolMaxPodsConstraint(c *Client, des, nw *NodePoolMaxPodsConstraint) *NodePoolMaxPodsConstraint { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolMaxPodsConstraint while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolMaxPodsConstraintSet(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolMaxPodsConstraint + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolMaxPodsConstraintNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolMaxPodsConstraintSlice(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolMaxPodsConstraint + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolManagement(des, initial *NodePoolManagement, opts ...dcl.ApplyOption) *NodePoolManagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolManagement{} + + if dcl.BoolCanonicalize(des.AutoRepair, initial.AutoRepair) || dcl.IsZeroValue(des.AutoRepair) { + cDes.AutoRepair = initial.AutoRepair + } else { + cDes.AutoRepair = des.AutoRepair + } + + return cDes +} + +func canonicalizeNodePoolManagementSlice(des, initial []NodePoolManagement, opts ...dcl.ApplyOption) []NodePoolManagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolManagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolManagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolManagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolManagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolManagement(c *Client, des, nw *NodePoolManagement) *NodePoolManagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolManagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutoRepair, nw.AutoRepair) { + nw.AutoRepair = des.AutoRepair + } + + return nw +} + +func canonicalizeNewNodePoolManagementSet(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolManagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolManagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolManagementSlice(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolManagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolKubeletConfig(des, initial *NodePoolKubeletConfig, opts ...dcl.ApplyOption) *NodePoolKubeletConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolKubeletConfig{} + + if dcl.IsZeroValue(des.CpuManagerPolicy) || (dcl.IsEmptyValueIndirect(des.CpuManagerPolicy) && dcl.IsEmptyValueIndirect(initial.CpuManagerPolicy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.CpuManagerPolicy = initial.CpuManagerPolicy + } else { + cDes.CpuManagerPolicy = des.CpuManagerPolicy + } + if dcl.BoolCanonicalize(des.CpuCfsQuota, initial.CpuCfsQuota) || dcl.IsZeroValue(des.CpuCfsQuota) { + cDes.CpuCfsQuota = initial.CpuCfsQuota + } else { + cDes.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, initial.CpuCfsQuotaPeriod) || dcl.IsZeroValue(des.CpuCfsQuotaPeriod) { + cDes.CpuCfsQuotaPeriod = initial.CpuCfsQuotaPeriod + } else { + cDes.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + if dcl.IsZeroValue(des.PodPidsLimit) || (dcl.IsEmptyValueIndirect(des.PodPidsLimit) && dcl.IsEmptyValueIndirect(initial.PodPidsLimit)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PodPidsLimit = initial.PodPidsLimit + } else { + cDes.PodPidsLimit = des.PodPidsLimit + } + + return cDes +} + +func canonicalizeNodePoolKubeletConfigSlice(des, initial []NodePoolKubeletConfig, opts ...dcl.ApplyOption) []NodePoolKubeletConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolKubeletConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolKubeletConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolKubeletConfig(c *Client, des, nw *NodePoolKubeletConfig) *NodePoolKubeletConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolKubeletConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.CpuCfsQuota, nw.CpuCfsQuota) { + nw.CpuCfsQuota = des.CpuCfsQuota + } + if dcl.StringCanonicalize(des.CpuCfsQuotaPeriod, nw.CpuCfsQuotaPeriod) { + nw.CpuCfsQuotaPeriod = des.CpuCfsQuotaPeriod + } + + return nw +} + +func canonicalizeNewNodePoolKubeletConfigSet(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolKubeletConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolKubeletConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolKubeletConfigSlice(c *Client, des, nw []NodePoolKubeletConfig) []NodePoolKubeletConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolKubeletConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolKubeletConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolUpdateSettings(des, initial *NodePoolUpdateSettings, opts ...dcl.ApplyOption) *NodePoolUpdateSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolUpdateSettings{} + + cDes.SurgeSettings = canonicalizeNodePoolUpdateSettingsSurgeSettings(des.SurgeSettings, initial.SurgeSettings, opts...) + + return cDes +} + +func canonicalizeNodePoolUpdateSettingsSlice(des, initial []NodePoolUpdateSettings, opts ...dcl.ApplyOption) []NodePoolUpdateSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolUpdateSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolUpdateSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolUpdateSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolUpdateSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolUpdateSettings(c *Client, des, nw *NodePoolUpdateSettings) *NodePoolUpdateSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolUpdateSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.SurgeSettings = canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, des.SurgeSettings, nw.SurgeSettings) + + return nw +} + +func canonicalizeNewNodePoolUpdateSettingsSet(c *Client, des, nw []NodePoolUpdateSettings) []NodePoolUpdateSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolUpdateSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolUpdateSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolUpdateSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolUpdateSettingsSlice(c *Client, des, nw []NodePoolUpdateSettings) []NodePoolUpdateSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolUpdateSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolUpdateSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolUpdateSettingsSurgeSettings(des, initial *NodePoolUpdateSettingsSurgeSettings, opts ...dcl.ApplyOption) *NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolUpdateSettingsSurgeSettings{} + + if dcl.IsZeroValue(des.MaxSurge) || (dcl.IsEmptyValueIndirect(des.MaxSurge) && dcl.IsEmptyValueIndirect(initial.MaxSurge)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxSurge = initial.MaxSurge + } else { + cDes.MaxSurge = des.MaxSurge + } + if dcl.IsZeroValue(des.MaxUnavailable) || (dcl.IsEmptyValueIndirect(des.MaxUnavailable) && dcl.IsEmptyValueIndirect(initial.MaxUnavailable)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxUnavailable = initial.MaxUnavailable + } else { + cDes.MaxUnavailable = des.MaxUnavailable + } + + return cDes +} + +func canonicalizeNodePoolUpdateSettingsSurgeSettingsSlice(des, initial []NodePoolUpdateSettingsSurgeSettings, opts ...dcl.ApplyOption) []NodePoolUpdateSettingsSurgeSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolUpdateSettingsSurgeSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolUpdateSettingsSurgeSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c *Client, des, nw *NodePoolUpdateSettingsSurgeSettings) *NodePoolUpdateSettingsSurgeSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolUpdateSettingsSurgeSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettingsSet(c *Client, des, nw []NodePoolUpdateSettingsSurgeSettings) []NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolUpdateSettingsSurgeSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolUpdateSettingsSurgeSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, des, nw []NodePoolUpdateSettingsSurgeSettings) []NodePoolUpdateSettingsSurgeSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolUpdateSettingsSurgeSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolUpdateSettingsSurgeSettings(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffNodePool(c *Client, desired, actual *NodePool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigNewStyle, EmptyObject: EmptyNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareNodePoolAutoscalingNewStyle, EmptyObject: EmptyNodePoolAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxPodsConstraint, actual.MaxPodsConstraint, dcl.DiffInfo{ObjectFunction: compareNodePoolMaxPodsConstraintNewStyle, EmptyObject: EmptyNodePoolMaxPodsConstraint, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsConstraint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ObjectFunction: compareNodePoolManagementNewStyle, EmptyObject: EmptyNodePoolManagement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubeletConfig, actual.KubeletConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolKubeletConfigNewStyle, EmptyObject: EmptyNodePoolKubeletConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubeletConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateSettings, actual.UpdateSettings, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolUpdateSettingsNewStyle, EmptyObject: EmptyNodePoolUpdateSettings, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("UpdateSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig or *NodePoolConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceType, actual.InstanceType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("InstanceType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigRootVolumeNewStyle, EmptyObject: EmptyNodePoolConfigRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Taints, actual.Taints, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigTaintsNewStyle, EmptyObject: EmptyNodePoolConfigTaints, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Taints")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IamInstanceProfile, actual.IamInstanceProfile, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("IamInstanceProfile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConfigEncryption, actual.ConfigEncryption, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigConfigEncryptionNewStyle, EmptyObject: EmptyNodePoolConfigConfigEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSshConfigNewStyle, EmptyObject: EmptyNodePoolConfigSshConfig, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.SpotConfig, actual.SpotConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSpotConfigNewStyle, EmptyObject: EmptyNodePoolConfigSpotConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SpotConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + +{{- end }} + if ds, err := dcl.Diff(desired.SecurityGroupIds, actual.SecurityGroupIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecurityGroupIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigProxyConfigNewStyle, EmptyObject: EmptyNodePoolConfigProxyConfig, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstancePlacement, actual.InstancePlacement, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigInstancePlacementNewStyle, EmptyObject: EmptyNodePoolConfigInstancePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstancePlacement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ImageType, actual.ImageType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageType")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoscalingMetricsCollection, actual.AutoscalingMetricsCollection, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigAutoscalingMetricsCollectionNewStyle, EmptyObject: EmptyNodePoolConfigAutoscalingMetricsCollection, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("AutoscalingMetricsCollection")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigRootVolume) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume or *NodePoolConfigRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigRootVolume) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VolumeType, actual.VolumeType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("VolumeType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Iops, actual.Iops, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Iops")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigTaintsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigTaints) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigTaints) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigTaints or *NodePoolConfigTaints", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigTaints) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigTaints) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigTaints", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Value")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Effect, actual.Effect, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Effect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigConfigEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigConfigEncryption) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigConfigEncryption or *NodePoolConfigConfigEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigConfigEncryption) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigConfigEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigConfigEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSshConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig or *NodePoolConfigSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSshConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Ec2KeyPair, actual.Ec2KeyPair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Ec2KeyPair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- if ne $.TargetVersionName "ga" }} +func compareNodePoolConfigSpotConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSpotConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSpotConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSpotConfig or *NodePoolConfigSpotConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSpotConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSpotConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSpotConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTypes, actual.InstanceTypes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTypes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +{{- end }} +func compareNodePoolConfigProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigProxyConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig or *NodePoolConfigProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigProxyConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SecretArn, actual.SecretArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecretArn")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretVersion, actual.SecretVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SecretVersion")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigInstancePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigInstancePlacement) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigInstancePlacement or *NodePoolConfigInstancePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigInstancePlacement) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigInstancePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigInstancePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Tenancy, actual.Tenancy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tenancy")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigAutoscalingMetricsCollectionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigAutoscalingMetricsCollection) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigAutoscalingMetricsCollection) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigAutoscalingMetricsCollection or *NodePoolConfigAutoscalingMetricsCollection", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigAutoscalingMetricsCollection) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigAutoscalingMetricsCollection) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigAutoscalingMetricsCollection", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Granularity, actual.Granularity, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Granularity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolAutoscaling) + if !ok { + desiredNotPointer, ok := d.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling or *NodePoolAutoscaling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolAutoscaling) + if !ok { + actualNotPointer, ok := a.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolMaxPodsConstraintNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolMaxPodsConstraint) + if !ok { + desiredNotPointer, ok := d.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint or *NodePoolMaxPodsConstraint", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolMaxPodsConstraint) + if !ok { + actualNotPointer, ok := a.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxPodsPerNode, actual.MaxPodsPerNode, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsPerNode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolManagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolManagement) + if !ok { + desiredNotPointer, ok := d.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement or *NodePoolManagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolManagement) + if !ok { + actualNotPointer, ok := a.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutoRepair, actual.AutoRepair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("AutoRepair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolKubeletConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolKubeletConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig or *NodePoolKubeletConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolKubeletConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolKubeletConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolKubeletConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.CpuManagerPolicy, actual.CpuManagerPolicy, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuManagerPolicy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuota, actual.CpuCfsQuota, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuota")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuCfsQuotaPeriod, actual.CpuCfsQuotaPeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuCfsQuotaPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodPidsLimit, actual.PodPidsLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodPidsLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolUpdateSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolUpdateSettings) + if !ok { + desiredNotPointer, ok := d.(NodePoolUpdateSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettings or *NodePoolUpdateSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolUpdateSettings) + if !ok { + actualNotPointer, ok := a.(NodePoolUpdateSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SurgeSettings, actual.SurgeSettings, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolUpdateSettingsSurgeSettingsNewStyle, EmptyObject: EmptyNodePoolUpdateSettingsSurgeSettings, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("SurgeSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolUpdateSettingsSurgeSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolUpdateSettingsSurgeSettings) + if !ok { + desiredNotPointer, ok := d.(NodePoolUpdateSettingsSurgeSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettingsSurgeSettings or *NodePoolUpdateSettingsSurgeSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolUpdateSettingsSurgeSettings) + if !ok { + actualNotPointer, ok := a.(NodePoolUpdateSettingsSurgeSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolUpdateSettingsSurgeSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxSurge, actual.MaxSurge, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxSurge")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxUnavailable, actual.MaxUnavailable, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("MaxUnavailable")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *NodePool) urlNormalized() *NodePool { + normalized := dcl.Copy(*r).(NodePool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Version = dcl.SelfLinkToName(r.Version) + normalized.SubnetId = dcl.SelfLinkToName(r.SubnetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Cluster = dcl.SelfLinkToName(r.Cluster) + return &normalized +} + +func (r *NodePool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAwsNodePool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the NodePool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *NodePool) marshal(c *Client) ([]byte, error) { + m, err := expandNodePool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling NodePool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalNodePool decodes JSON responses into the NodePool resource schema. +func unmarshalNodePool(b []byte, c *Client, res *NodePool) (*NodePool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapNodePool(m, c, res) +} + +func unmarshalMapNodePool(m map[string]interface{}, c *Client, res *NodePool) (*NodePool, error) { + + flattened := flattenNodePool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandNodePool expands NodePool into a JSON request object. +func expandNodePool(c *Client, f *NodePool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/awsClusters/%s/awsNodePools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Cluster), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Version; dcl.ValueShouldBeSent(v) { + m["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscaling"] = v + } + if v := f.SubnetId; dcl.ValueShouldBeSent(v) { + m["subnetId"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandNodePoolMaxPodsConstraint(c, f.MaxPodsConstraint, res); err != nil { + return nil, fmt.Errorf("error expanding MaxPodsConstraint into maxPodsConstraint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["maxPodsConstraint"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v, err := expandNodePoolKubeletConfig(c, f.KubeletConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KubeletConfig into kubeletConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubeletConfig"] = v + } + if v, err := expandNodePoolUpdateSettings(c, f.UpdateSettings, res); err != nil { + return nil, fmt.Errorf("error expanding UpdateSettings into updateSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["updateSettings"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Cluster into cluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + + return m, nil +} + +// flattenNodePool flattens NodePool from a JSON request object into the +// NodePool type. +func flattenNodePool(c *Client, i interface{}, res *NodePool) *NodePool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &NodePool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenString(m["version"]) + resultRes.Config = flattenNodePoolConfig(c, m["config"], res) + resultRes.Autoscaling = flattenNodePoolAutoscaling(c, m["autoscaling"], res) + resultRes.SubnetId = dcl.FlattenString(m["subnetId"]) + resultRes.State = flattenNodePoolStateEnum(m["state"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.MaxPodsConstraint = flattenNodePoolMaxPodsConstraint(c, m["maxPodsConstraint"], res) + resultRes.Management = flattenNodePoolManagement(c, m["management"], res) + resultRes.KubeletConfig = flattenNodePoolKubeletConfig(c, m["kubeletConfig"], res) + resultRes.UpdateSettings = flattenNodePoolUpdateSettings(c, m["updateSettings"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Cluster = dcl.FlattenString(m["cluster"]) + + return resultRes +} + +// expandNodePoolConfigMap expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigMap(c *Client, f map[string]NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSlice expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigSlice(c *Client, f []NodePoolConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigMap flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfig{} + } + + items := make(map[string]NodePoolConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSlice flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfig{} + } + + if len(a) == 0 { + return []NodePoolConfig{} + } + + items := make([]NodePoolConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfig expands an instance of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfig(c *Client, f *NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceType; !dcl.IsEmptyValueIndirect(v) { + m["instanceType"] = v + } + if v, err := expandNodePoolConfigRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandNodePoolConfigTaintsSlice(c, f.Taints, res); err != nil { + return nil, fmt.Errorf("error expanding Taints into taints: %w", err) + } else if v != nil { + m["taints"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v := f.IamInstanceProfile; !dcl.IsEmptyValueIndirect(v) { + m["iamInstanceProfile"] = v + } + if v, err := expandNodePoolConfigConfigEncryption(c, f.ConfigEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigEncryption into configEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configEncryption"] = v + } + if v, err := expandNodePoolConfigSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v, err := expandNodePoolConfigSpotConfig(c, f.SpotConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SpotConfig into spotConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["spotConfig"] = v + } +{{- end }} + if v := f.SecurityGroupIds; v != nil { + m["securityGroupIds"] = v + } + if v, err := expandNodePoolConfigProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandNodePoolConfigInstancePlacement(c, f.InstancePlacement, res); err != nil { + return nil, fmt.Errorf("error expanding InstancePlacement into instancePlacement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["instancePlacement"] = v + } + if v := f.ImageType; !dcl.IsEmptyValueIndirect(v) { + m["imageType"] = v +{{- end }} + } + if v, err := expandNodePoolConfigAutoscalingMetricsCollection(c, f.AutoscalingMetricsCollection, res); err != nil { + return nil, fmt.Errorf("error expanding AutoscalingMetricsCollection into autoscalingMetricsCollection: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscalingMetricsCollection"] = v + } + + return m, nil +} + +// flattenNodePoolConfig flattens an instance of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfig + } + r.InstanceType = dcl.FlattenString(m["instanceType"]) + r.RootVolume = flattenNodePoolConfigRootVolume(c, m["rootVolume"], res) + r.Taints = flattenNodePoolConfigTaintsSlice(c, m["taints"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.IamInstanceProfile = dcl.FlattenString(m["iamInstanceProfile"]) + r.ConfigEncryption = flattenNodePoolConfigConfigEncryption(c, m["configEncryption"], res) + r.SshConfig = flattenNodePoolConfigSshConfig(c, m["sshConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.SpotConfig = flattenNodePoolConfigSpotConfig(c, m["spotConfig"], res) +{{- end }} + r.SecurityGroupIds = dcl.FlattenStringSlice(m["securityGroupIds"]) + r.ProxyConfig = flattenNodePoolConfigProxyConfig(c, m["proxyConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.InstancePlacement = flattenNodePoolConfigInstancePlacement(c, m["instancePlacement"], res) + r.ImageType = dcl.FlattenString(m["imageType"]) +{{- end }} + r.AutoscalingMetricsCollection = flattenNodePoolConfigAutoscalingMetricsCollection(c, m["autoscalingMetricsCollection"], res) + + return r +} + +// expandNodePoolConfigRootVolumeMap expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeMap(c *Client, f map[string]NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigRootVolumeSlice expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeSlice(c *Client, f []NodePoolConfigRootVolume, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigRootVolumeMap flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolume{} + } + + items := make(map[string]NodePoolConfigRootVolume) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigRootVolumeSlice flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolume{} + } + + items := make([]NodePoolConfigRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigRootVolume expands an instance of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolume(c *Client, f *NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + if v := f.VolumeType; !dcl.IsEmptyValueIndirect(v) { + m["volumeType"] = v + } + if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { + m["iops"] = v + } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenNodePoolConfigRootVolume flattens an instance of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolume(c *Client, i interface{}, res *NodePool) *NodePoolConfigRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + r.VolumeType = flattenNodePoolConfigRootVolumeVolumeTypeEnum(m["volumeType"]) + r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandNodePoolConfigTaintsMap expands the contents of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaintsMap(c *Client, f map[string]NodePoolConfigTaints, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigTaints(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigTaintsSlice expands the contents of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaintsSlice(c *Client, f []NodePoolConfigTaints, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigTaints(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigTaintsMap flattens the contents of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaintsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigTaints { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigTaints{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigTaints{} + } + + items := make(map[string]NodePoolConfigTaints) + for k, item := range a { + items[k] = *flattenNodePoolConfigTaints(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigTaintsSlice flattens the contents of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaintsSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigTaints { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigTaints{} + } + + if len(a) == 0 { + return []NodePoolConfigTaints{} + } + + items := make([]NodePoolConfigTaints, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigTaints(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigTaints expands an instance of NodePoolConfigTaints into a JSON +// request object. +func expandNodePoolConfigTaints(c *Client, f *NodePoolConfigTaints, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Value; !dcl.IsEmptyValueIndirect(v) { + m["value"] = v + } + if v := f.Effect; !dcl.IsEmptyValueIndirect(v) { + m["effect"] = v + } + + return m, nil +} + +// flattenNodePoolConfigTaints flattens an instance of NodePoolConfigTaints from a JSON +// response object. +func flattenNodePoolConfigTaints(c *Client, i interface{}, res *NodePool) *NodePoolConfigTaints { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigTaints{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigTaints + } + r.Key = dcl.FlattenString(m["key"]) + r.Value = dcl.FlattenString(m["value"]) + r.Effect = flattenNodePoolConfigTaintsEffectEnum(m["effect"]) + + return r +} + +// expandNodePoolConfigConfigEncryptionMap expands the contents of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryptionMap(c *Client, f map[string]NodePoolConfigConfigEncryption, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigConfigEncryptionSlice expands the contents of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryptionSlice(c *Client, f []NodePoolConfigConfigEncryption, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigConfigEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigConfigEncryptionMap flattens the contents of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryptionMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigConfigEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigConfigEncryption{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigConfigEncryption{} + } + + items := make(map[string]NodePoolConfigConfigEncryption) + for k, item := range a { + items[k] = *flattenNodePoolConfigConfigEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigConfigEncryptionSlice flattens the contents of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryptionSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigConfigEncryption { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigConfigEncryption{} + } + + if len(a) == 0 { + return []NodePoolConfigConfigEncryption{} + } + + items := make([]NodePoolConfigConfigEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigConfigEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigConfigEncryption expands an instance of NodePoolConfigConfigEncryption into a JSON +// request object. +func expandNodePoolConfigConfigEncryption(c *Client, f *NodePoolConfigConfigEncryption, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyArn"] = v + } + + return m, nil +} + +// flattenNodePoolConfigConfigEncryption flattens an instance of NodePoolConfigConfigEncryption from a JSON +// response object. +func flattenNodePoolConfigConfigEncryption(c *Client, i interface{}, res *NodePool) *NodePoolConfigConfigEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigConfigEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigConfigEncryption + } + r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) + + return r +} + +// expandNodePoolConfigSshConfigMap expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigMap(c *Client, f map[string]NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSshConfigSlice expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigSlice(c *Client, f []NodePoolConfigSshConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSshConfigMap flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSshConfig{} + } + + items := make(map[string]NodePoolConfigSshConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSshConfigSlice flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSshConfig{} + } + + items := make([]NodePoolConfigSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSshConfig expands an instance of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfig(c *Client, f *NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Ec2KeyPair; !dcl.IsEmptyValueIndirect(v) { + m["ec2KeyPair"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSshConfig flattens an instance of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSshConfig + } + r.Ec2KeyPair = dcl.FlattenString(m["ec2KeyPair"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandNodePoolConfigSpotConfigMap expands the contents of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfigMap(c *Client, f map[string]NodePoolConfigSpotConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSpotConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSpotConfigSlice expands the contents of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfigSlice(c *Client, f []NodePoolConfigSpotConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSpotConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSpotConfigMap flattens the contents of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSpotConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSpotConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSpotConfig{} + } + + items := make(map[string]NodePoolConfigSpotConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSpotConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSpotConfigSlice flattens the contents of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSpotConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSpotConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSpotConfig{} + } + + items := make([]NodePoolConfigSpotConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSpotConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSpotConfig expands an instance of NodePoolConfigSpotConfig into a JSON +// request object. +func expandNodePoolConfigSpotConfig(c *Client, f *NodePoolConfigSpotConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceTypes; v != nil { + m["instanceTypes"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSpotConfig flattens an instance of NodePoolConfigSpotConfig from a JSON +// response object. +func flattenNodePoolConfigSpotConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSpotConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSpotConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSpotConfig + } + r.InstanceTypes = dcl.FlattenStringSlice(m["instanceTypes"]) + + return r +} + +{{- end }} +// expandNodePoolConfigProxyConfigMap expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigMap(c *Client, f map[string]NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigProxyConfigSlice expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigSlice(c *Client, f []NodePoolConfigProxyConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigProxyConfigMap flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigProxyConfig{} + } + + items := make(map[string]NodePoolConfigProxyConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigProxyConfigSlice flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigProxyConfig{} + } + + items := make([]NodePoolConfigProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigProxyConfig expands an instance of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfig(c *Client, f *NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SecretArn; !dcl.IsEmptyValueIndirect(v) { + m["secretArn"] = v + } + if v := f.SecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["secretVersion"] = v + } + + return m, nil +} + +// flattenNodePoolConfigProxyConfig flattens an instance of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigProxyConfig + } + r.SecretArn = dcl.FlattenString(m["secretArn"]) + r.SecretVersion = dcl.FlattenString(m["secretVersion"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandNodePoolConfigInstancePlacementMap expands the contents of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacementMap(c *Client, f map[string]NodePoolConfigInstancePlacement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigInstancePlacementSlice expands the contents of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacementSlice(c *Client, f []NodePoolConfigInstancePlacement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigInstancePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigInstancePlacementMap flattens the contents of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigInstancePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigInstancePlacement{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigInstancePlacement{} + } + + items := make(map[string]NodePoolConfigInstancePlacement) + for k, item := range a { + items[k] = *flattenNodePoolConfigInstancePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementSlice flattens the contents of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigInstancePlacement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigInstancePlacement{} + } + + if len(a) == 0 { + return []NodePoolConfigInstancePlacement{} + } + + items := make([]NodePoolConfigInstancePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigInstancePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigInstancePlacement expands an instance of NodePoolConfigInstancePlacement into a JSON +// request object. +func expandNodePoolConfigInstancePlacement(c *Client, f *NodePoolConfigInstancePlacement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Tenancy; !dcl.IsEmptyValueIndirect(v) { + m["tenancy"] = v + } + + return m, nil +} + +// flattenNodePoolConfigInstancePlacement flattens an instance of NodePoolConfigInstancePlacement from a JSON +// response object. +func flattenNodePoolConfigInstancePlacement(c *Client, i interface{}, res *NodePool) *NodePoolConfigInstancePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigInstancePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigInstancePlacement + } + r.Tenancy = flattenNodePoolConfigInstancePlacementTenancyEnum(m["tenancy"]) + + return r +} + +{{- end }} +// expandNodePoolConfigAutoscalingMetricsCollectionMap expands the contents of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollectionMap(c *Client, f map[string]NodePoolConfigAutoscalingMetricsCollection, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigAutoscalingMetricsCollection(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigAutoscalingMetricsCollectionSlice expands the contents of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, f []NodePoolConfigAutoscalingMetricsCollection, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigAutoscalingMetricsCollection(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigAutoscalingMetricsCollectionMap flattens the contents of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollectionMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigAutoscalingMetricsCollection { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigAutoscalingMetricsCollection{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigAutoscalingMetricsCollection{} + } + + items := make(map[string]NodePoolConfigAutoscalingMetricsCollection) + for k, item := range a { + items[k] = *flattenNodePoolConfigAutoscalingMetricsCollection(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigAutoscalingMetricsCollectionSlice flattens the contents of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollectionSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigAutoscalingMetricsCollection { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigAutoscalingMetricsCollection{} + } + + if len(a) == 0 { + return []NodePoolConfigAutoscalingMetricsCollection{} + } + + items := make([]NodePoolConfigAutoscalingMetricsCollection, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigAutoscalingMetricsCollection(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigAutoscalingMetricsCollection expands an instance of NodePoolConfigAutoscalingMetricsCollection into a JSON +// request object. +func expandNodePoolConfigAutoscalingMetricsCollection(c *Client, f *NodePoolConfigAutoscalingMetricsCollection, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Granularity; !dcl.IsEmptyValueIndirect(v) { + m["granularity"] = v + } + if v := f.Metrics; v != nil { + m["metrics"] = v + } + + return m, nil +} + +// flattenNodePoolConfigAutoscalingMetricsCollection flattens an instance of NodePoolConfigAutoscalingMetricsCollection from a JSON +// response object. +func flattenNodePoolConfigAutoscalingMetricsCollection(c *Client, i interface{}, res *NodePool) *NodePoolConfigAutoscalingMetricsCollection { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigAutoscalingMetricsCollection{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigAutoscalingMetricsCollection + } + r.Granularity = dcl.FlattenString(m["granularity"]) + r.Metrics = dcl.FlattenStringSlice(m["metrics"]) + + return r +} + +// expandNodePoolAutoscalingMap expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingMap(c *Client, f map[string]NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolAutoscalingSlice expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingSlice(c *Client, f []NodePoolAutoscaling, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolAutoscalingMap flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolAutoscaling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolAutoscaling{} + } + + if len(a) == 0 { + return map[string]NodePoolAutoscaling{} + } + + items := make(map[string]NodePoolAutoscaling) + for k, item := range a { + items[k] = *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolAutoscalingSlice flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingSlice(c *Client, i interface{}, res *NodePool) []NodePoolAutoscaling { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolAutoscaling{} + } + + if len(a) == 0 { + return []NodePoolAutoscaling{} + } + + items := make([]NodePoolAutoscaling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolAutoscaling expands an instance of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscaling(c *Client, f *NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["minNodeCount"] = v + } + if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["maxNodeCount"] = v + } + + return m, nil +} + +// flattenNodePoolAutoscaling flattens an instance of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscaling(c *Client, i interface{}, res *NodePool) *NodePoolAutoscaling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolAutoscaling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolAutoscaling + } + r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) + r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) + + return r +} + +// expandNodePoolMaxPodsConstraintMap expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintMap(c *Client, f map[string]NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolMaxPodsConstraintSlice expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintSlice(c *Client, f []NodePoolMaxPodsConstraint, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolMaxPodsConstraintMap flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolMaxPodsConstraint { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return map[string]NodePoolMaxPodsConstraint{} + } + + items := make(map[string]NodePoolMaxPodsConstraint) + for k, item := range a { + items[k] = *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolMaxPodsConstraintSlice flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintSlice(c *Client, i interface{}, res *NodePool) []NodePoolMaxPodsConstraint { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return []NodePoolMaxPodsConstraint{} + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolMaxPodsConstraint expands an instance of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraint(c *Client, f *NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxPodsPerNode; !dcl.IsEmptyValueIndirect(v) { + m["maxPodsPerNode"] = v + } + + return m, nil +} + +// flattenNodePoolMaxPodsConstraint flattens an instance of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraint(c *Client, i interface{}, res *NodePool) *NodePoolMaxPodsConstraint { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolMaxPodsConstraint{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolMaxPodsConstraint + } + r.MaxPodsPerNode = dcl.FlattenInteger(m["maxPodsPerNode"]) + + return r +} + +// expandNodePoolManagementMap expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementMap(c *Client, f map[string]NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolManagementSlice expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementSlice(c *Client, f []NodePoolManagement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolManagementMap flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolManagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolManagement{} + } + + if len(a) == 0 { + return map[string]NodePoolManagement{} + } + + items := make(map[string]NodePoolManagement) + for k, item := range a { + items[k] = *flattenNodePoolManagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolManagementSlice flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementSlice(c *Client, i interface{}, res *NodePool) []NodePoolManagement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolManagement{} + } + + if len(a) == 0 { + return []NodePoolManagement{} + } + + items := make([]NodePoolManagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolManagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolManagement expands an instance of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagement(c *Client, f *NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutoRepair; !dcl.IsEmptyValueIndirect(v) { + m["autoRepair"] = v + } + + return m, nil +} + +// flattenNodePoolManagement flattens an instance of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagement(c *Client, i interface{}, res *NodePool) *NodePoolManagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolManagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolManagement + } + r.AutoRepair = dcl.FlattenBool(m["autoRepair"]) + + return r +} + +// expandNodePoolKubeletConfigMap expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigMap(c *Client, f map[string]NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolKubeletConfigSlice expands the contents of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfigSlice(c *Client, f []NodePoolKubeletConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolKubeletConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolKubeletConfigMap flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfig{} + } + + items := make(map[string]NodePoolKubeletConfig) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolKubeletConfigSlice flattens the contents of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfig{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfig{} + } + + items := make([]NodePoolKubeletConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolKubeletConfig expands an instance of NodePoolKubeletConfig into a JSON +// request object. +func expandNodePoolKubeletConfig(c *Client, f *NodePoolKubeletConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.CpuManagerPolicy; !dcl.IsEmptyValueIndirect(v) { + m["cpuManagerPolicy"] = v + } + if v := f.CpuCfsQuota; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuota"] = v + } + if v := f.CpuCfsQuotaPeriod; !dcl.IsEmptyValueIndirect(v) { + m["cpuCfsQuotaPeriod"] = v + } + if v := f.PodPidsLimit; !dcl.IsEmptyValueIndirect(v) { + m["podPidsLimit"] = v + } + + return m, nil +} + +// flattenNodePoolKubeletConfig flattens an instance of NodePoolKubeletConfig from a JSON +// response object. +func flattenNodePoolKubeletConfig(c *Client, i interface{}, res *NodePool) *NodePoolKubeletConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolKubeletConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolKubeletConfig + } + r.CpuManagerPolicy = flattenNodePoolKubeletConfigCpuManagerPolicyEnum(m["cpuManagerPolicy"]) + r.CpuCfsQuota = dcl.FlattenBool(m["cpuCfsQuota"]) + r.CpuCfsQuotaPeriod = dcl.FlattenString(m["cpuCfsQuotaPeriod"]) + r.PodPidsLimit = dcl.FlattenInteger(m["podPidsLimit"]) + + return r +} + +// expandNodePoolUpdateSettingsMap expands the contents of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsMap(c *Client, f map[string]NodePoolUpdateSettings, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolUpdateSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolUpdateSettingsSlice expands the contents of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSlice(c *Client, f []NodePoolUpdateSettings, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolUpdateSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolUpdateSettingsMap flattens the contents of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolUpdateSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolUpdateSettings{} + } + + if len(a) == 0 { + return map[string]NodePoolUpdateSettings{} + } + + items := make(map[string]NodePoolUpdateSettings) + for k, item := range a { + items[k] = *flattenNodePoolUpdateSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolUpdateSettingsSlice flattens the contents of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSlice(c *Client, i interface{}, res *NodePool) []NodePoolUpdateSettings { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolUpdateSettings{} + } + + if len(a) == 0 { + return []NodePoolUpdateSettings{} + } + + items := make([]NodePoolUpdateSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolUpdateSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolUpdateSettings expands an instance of NodePoolUpdateSettings into a JSON +// request object. +func expandNodePoolUpdateSettings(c *Client, f *NodePoolUpdateSettings, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandNodePoolUpdateSettingsSurgeSettings(c, f.SurgeSettings, res); err != nil { + return nil, fmt.Errorf("error expanding SurgeSettings into surgeSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["surgeSettings"] = v + } + + return m, nil +} + +// flattenNodePoolUpdateSettings flattens an instance of NodePoolUpdateSettings from a JSON +// response object. +func flattenNodePoolUpdateSettings(c *Client, i interface{}, res *NodePool) *NodePoolUpdateSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolUpdateSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolUpdateSettings + } + r.SurgeSettings = flattenNodePoolUpdateSettingsSurgeSettings(c, m["surgeSettings"], res) + + return r +} + +// expandNodePoolUpdateSettingsSurgeSettingsMap expands the contents of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettingsMap(c *Client, f map[string]NodePoolUpdateSettingsSurgeSettings, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolUpdateSettingsSurgeSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolUpdateSettingsSurgeSettingsSlice expands the contents of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, f []NodePoolUpdateSettingsSurgeSettings, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolUpdateSettingsSurgeSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolUpdateSettingsSurgeSettingsMap flattens the contents of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettingsMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolUpdateSettingsSurgeSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolUpdateSettingsSurgeSettings{} + } + + if len(a) == 0 { + return map[string]NodePoolUpdateSettingsSurgeSettings{} + } + + items := make(map[string]NodePoolUpdateSettingsSurgeSettings) + for k, item := range a { + items[k] = *flattenNodePoolUpdateSettingsSurgeSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolUpdateSettingsSurgeSettingsSlice flattens the contents of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettingsSlice(c *Client, i interface{}, res *NodePool) []NodePoolUpdateSettingsSurgeSettings { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolUpdateSettingsSurgeSettings{} + } + + if len(a) == 0 { + return []NodePoolUpdateSettingsSurgeSettings{} + } + + items := make([]NodePoolUpdateSettingsSurgeSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolUpdateSettingsSurgeSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolUpdateSettingsSurgeSettings expands an instance of NodePoolUpdateSettingsSurgeSettings into a JSON +// request object. +func expandNodePoolUpdateSettingsSurgeSettings(c *Client, f *NodePoolUpdateSettingsSurgeSettings, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxSurge; !dcl.IsEmptyValueIndirect(v) { + m["maxSurge"] = v + } + if v := f.MaxUnavailable; !dcl.IsEmptyValueIndirect(v) { + m["maxUnavailable"] = v + } + + return m, nil +} + +// flattenNodePoolUpdateSettingsSurgeSettings flattens an instance of NodePoolUpdateSettingsSurgeSettings from a JSON +// response object. +func flattenNodePoolUpdateSettingsSurgeSettings(c *Client, i interface{}, res *NodePool) *NodePoolUpdateSettingsSurgeSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolUpdateSettingsSurgeSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolUpdateSettingsSurgeSettings + } + r.MaxSurge = dcl.FlattenInteger(m["maxSurge"]) + r.MaxUnavailable = dcl.FlattenInteger(m["maxUnavailable"]) + + return r +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnumMap flattens the contents of NodePoolConfigRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenNodePoolConfigRootVolumeVolumeTypeEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolumeVolumeTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + items := make(map[string]NodePoolConfigRootVolumeVolumeTypeEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolumeVolumeTypeEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnumSlice flattens the contents of NodePoolConfigRootVolumeVolumeTypeEnum from a JSON +// response object. +func flattenNodePoolConfigRootVolumeVolumeTypeEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolumeVolumeTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolumeVolumeTypeEnum{} + } + + items := make([]NodePoolConfigRootVolumeVolumeTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolumeVolumeTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigRootVolumeVolumeTypeEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigRootVolumeVolumeTypeEnum with the same value as that string. +func flattenNodePoolConfigRootVolumeVolumeTypeEnum(i interface{}) *NodePoolConfigRootVolumeVolumeTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigRootVolumeVolumeTypeEnumRef(s) +} + +// flattenNodePoolConfigTaintsEffectEnumMap flattens the contents of NodePoolConfigTaintsEffectEnum from a JSON +// response object. +func flattenNodePoolConfigTaintsEffectEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigTaintsEffectEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigTaintsEffectEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigTaintsEffectEnum{} + } + + items := make(map[string]NodePoolConfigTaintsEffectEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigTaintsEffectEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigTaintsEffectEnumSlice flattens the contents of NodePoolConfigTaintsEffectEnum from a JSON +// response object. +func flattenNodePoolConfigTaintsEffectEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigTaintsEffectEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigTaintsEffectEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigTaintsEffectEnum{} + } + + items := make([]NodePoolConfigTaintsEffectEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigTaintsEffectEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigTaintsEffectEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigTaintsEffectEnum with the same value as that string. +func flattenNodePoolConfigTaintsEffectEnum(i interface{}) *NodePoolConfigTaintsEffectEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigTaintsEffectEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenNodePoolConfigInstancePlacementTenancyEnumMap flattens the contents of NodePoolConfigInstancePlacementTenancyEnum from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementTenancyEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigInstancePlacementTenancyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigInstancePlacementTenancyEnum{} + } + + items := make(map[string]NodePoolConfigInstancePlacementTenancyEnum) + for k, item := range a { + items[k] = *flattenNodePoolConfigInstancePlacementTenancyEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementTenancyEnumSlice flattens the contents of NodePoolConfigInstancePlacementTenancyEnum from a JSON +// response object. +func flattenNodePoolConfigInstancePlacementTenancyEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigInstancePlacementTenancyEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigInstancePlacementTenancyEnum{} + } + + if len(a) == 0 { + return []NodePoolConfigInstancePlacementTenancyEnum{} + } + + items := make([]NodePoolConfigInstancePlacementTenancyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigInstancePlacementTenancyEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolConfigInstancePlacementTenancyEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolConfigInstancePlacementTenancyEnum with the same value as that string. +func flattenNodePoolConfigInstancePlacementTenancyEnum(i interface{}) *NodePoolConfigInstancePlacementTenancyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolConfigInstancePlacementTenancyEnumRef(s) +{{- end }} +} + +// flattenNodePoolStateEnumMap flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolStateEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolStateEnum{} + } + + items := make(map[string]NodePoolStateEnum) + for k, item := range a { + items[k] = *flattenNodePoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolStateEnumSlice flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolStateEnum{} + } + + if len(a) == 0 { + return []NodePoolStateEnum{} + } + + items := make([]NodePoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolStateEnum with the same value as that string. +func flattenNodePoolStateEnum(i interface{}) *NodePoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolStateEnumRef(s) +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make(map[string]NodePoolKubeletConfigCpuManagerPolicyEnum) + for k, item := range a { + items[k] = *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice flattens the contents of NodePoolKubeletConfigCpuManagerPolicyEnum from a JSON +// response object. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolKubeletConfigCpuManagerPolicyEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + if len(a) == 0 { + return []NodePoolKubeletConfigCpuManagerPolicyEnum{} + } + + items := make([]NodePoolKubeletConfigCpuManagerPolicyEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolKubeletConfigCpuManagerPolicyEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolKubeletConfigCpuManagerPolicyEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolKubeletConfigCpuManagerPolicyEnum with the same value as that string. +func flattenNodePoolKubeletConfigCpuManagerPolicyEnum(i interface{}) *NodePoolKubeletConfigCpuManagerPolicyEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolKubeletConfigCpuManagerPolicyEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *NodePool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalNodePool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Cluster == nil && ncr.Cluster == nil { + c.Config.Logger.Info("Both Cluster fields null - considering equal.") + } else if nr.Cluster == nil || ncr.Cluster == nil { + c.Config.Logger.Info("Only one Cluster field is null - considering unequal.") + return false + } else if *nr.Cluster != *ncr.Cluster { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type nodePoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp nodePoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToNodePoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]nodePoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []nodePoolDiff + // For each operation name, create a nodePoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := nodePoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToNodePoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToNodePoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (nodePoolApiOperation, error) { + switch opName { + + case "updateNodePoolUpdateAwsNodePoolOperation": + return &updateNodePoolUpdateAwsNodePoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := extractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := extractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := extractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := extractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := extractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } + vUpdateSettings := r.UpdateSettings + if vUpdateSettings == nil { + // note: explicitly not the empty object. + vUpdateSettings = &NodePoolUpdateSettings{} + } + if err := extractNodePoolUpdateSettingsFields(r, vUpdateSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vUpdateSettings) { + r.UpdateSettings = vUpdateSettings + } + return nil +} +func extractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &NodePoolConfigConfigEncryption{} + } + if err := extractNodePoolConfigConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } +{{- if ne $.TargetVersionName "ga" }} + vSpotConfig := o.SpotConfig + if vSpotConfig == nil { + // note: explicitly not the empty object. + vSpotConfig = &NodePoolConfigSpotConfig{} + } + if err := extractNodePoolConfigSpotConfigFields(r, vSpotConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpotConfig) { + o.SpotConfig = vSpotConfig + } +{{- end }} + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &NodePoolConfigInstancePlacement{} + } + if err := extractNodePoolConfigInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + vAutoscalingMetricsCollection := o.AutoscalingMetricsCollection + if vAutoscalingMetricsCollection == nil { + // note: explicitly not the empty object. + vAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{} + } + if err := extractNodePoolConfigAutoscalingMetricsCollectionFields(r, vAutoscalingMetricsCollection); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingMetricsCollection) { + o.AutoscalingMetricsCollection = vAutoscalingMetricsCollection + } + return nil +} +func extractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func extractNodePoolConfigTaintsFields(r *NodePool, o *NodePoolConfigTaints) error { + return nil +} +func extractNodePoolConfigConfigEncryptionFields(r *NodePool, o *NodePoolConfigConfigEncryption) error { + return nil +} +func extractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractNodePoolConfigSpotConfigFields(r *NodePool, o *NodePoolConfigSpotConfig) error { + return nil +} +{{- end }} +func extractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func extractNodePoolConfigInstancePlacementFields(r *NodePool, o *NodePoolConfigInstancePlacement) error { +{{- end }} + return nil +} +func extractNodePoolConfigAutoscalingMetricsCollectionFields(r *NodePool, o *NodePoolConfigAutoscalingMetricsCollection) error { + return nil +} +func extractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func extractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func extractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} +func extractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} +func extractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { + vSurgeSettings := o.SurgeSettings + if vSurgeSettings == nil { + // note: explicitly not the empty object. + vSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{} + } + if err := extractNodePoolUpdateSettingsSurgeSettingsFields(r, vSurgeSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSurgeSettings) { + o.SurgeSettings = vSurgeSettings + } + return nil +} +func extractNodePoolUpdateSettingsSurgeSettingsFields(r *NodePool, o *NodePoolUpdateSettingsSurgeSettings) error { + return nil +} + +func postReadExtractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := postReadExtractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := postReadExtractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := postReadExtractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := postReadExtractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + vKubeletConfig := r.KubeletConfig + if vKubeletConfig == nil { + // note: explicitly not the empty object. + vKubeletConfig = &NodePoolKubeletConfig{} + } + if err := postReadExtractNodePoolKubeletConfigFields(r, vKubeletConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubeletConfig) { + r.KubeletConfig = vKubeletConfig + } + vUpdateSettings := r.UpdateSettings + if vUpdateSettings == nil { + // note: explicitly not the empty object. + vUpdateSettings = &NodePoolUpdateSettings{} + } + if err := postReadExtractNodePoolUpdateSettingsFields(r, vUpdateSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vUpdateSettings) { + r.UpdateSettings = vUpdateSettings + } + return nil +} +func postReadExtractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vConfigEncryption := o.ConfigEncryption + if vConfigEncryption == nil { + // note: explicitly not the empty object. + vConfigEncryption = &NodePoolConfigConfigEncryption{} + } + if err := extractNodePoolConfigConfigEncryptionFields(r, vConfigEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigEncryption) { + o.ConfigEncryption = vConfigEncryption + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } +{{- if ne $.TargetVersionName "ga" }} + vSpotConfig := o.SpotConfig + if vSpotConfig == nil { + // note: explicitly not the empty object. + vSpotConfig = &NodePoolConfigSpotConfig{} + } + if err := extractNodePoolConfigSpotConfigFields(r, vSpotConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpotConfig) { + o.SpotConfig = vSpotConfig + } +{{- end }} + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } +{{- if ne $.TargetVersionName "ga" }} + vInstancePlacement := o.InstancePlacement + if vInstancePlacement == nil { + // note: explicitly not the empty object. + vInstancePlacement = &NodePoolConfigInstancePlacement{} + } + if err := extractNodePoolConfigInstancePlacementFields(r, vInstancePlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstancePlacement) { + o.InstancePlacement = vInstancePlacement + } +{{- end }} + vAutoscalingMetricsCollection := o.AutoscalingMetricsCollection + if vAutoscalingMetricsCollection == nil { + // note: explicitly not the empty object. + vAutoscalingMetricsCollection = &NodePoolConfigAutoscalingMetricsCollection{} + } + if err := extractNodePoolConfigAutoscalingMetricsCollectionFields(r, vAutoscalingMetricsCollection); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingMetricsCollection) { + o.AutoscalingMetricsCollection = vAutoscalingMetricsCollection + } + return nil +} +func postReadExtractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func postReadExtractNodePoolConfigTaintsFields(r *NodePool, o *NodePoolConfigTaints) error { + return nil +} +func postReadExtractNodePoolConfigConfigEncryptionFields(r *NodePool, o *NodePoolConfigConfigEncryption) error { + return nil +} +func postReadExtractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractNodePoolConfigSpotConfigFields(r *NodePool, o *NodePoolConfigSpotConfig) error { + return nil +} +{{- end }} +func postReadExtractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func postReadExtractNodePoolConfigInstancePlacementFields(r *NodePool, o *NodePoolConfigInstancePlacement) error { +{{- end }} + return nil +} +func postReadExtractNodePoolConfigAutoscalingMetricsCollectionFields(r *NodePool, o *NodePoolConfigAutoscalingMetricsCollection) error { + return nil +} +func postReadExtractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func postReadExtractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func postReadExtractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} +func postReadExtractNodePoolKubeletConfigFields(r *NodePool, o *NodePoolKubeletConfig) error { + return nil +} +func postReadExtractNodePoolUpdateSettingsFields(r *NodePool, o *NodePoolUpdateSettings) error { + vSurgeSettings := o.SurgeSettings + if vSurgeSettings == nil { + // note: explicitly not the empty object. + vSurgeSettings = &NodePoolUpdateSettingsSurgeSettings{} + } + if err := extractNodePoolUpdateSettingsSurgeSettingsFields(r, vSurgeSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSurgeSettings) { + o.SurgeSettings = vSurgeSettings + } + return nil +} +func postReadExtractNodePoolUpdateSettingsSurgeSettingsFields(r *NodePool, o *NodePoolUpdateSettingsSurgeSettings) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl new file mode 100644 index 000000000000..8e8a935379cc --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl @@ -0,0 +1,661 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLNodePoolSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "ContainerAws/NodePool", + Description: "An Anthos node pool running on AWS.", + StructName: "NodePool", + Reference: &dcl.Link{ + Text: "API reference", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters.awsNodePools", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Multicloud overview", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "cluster", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "cluster", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "NodePool": &dcl.Component{ + Title: "NodePool", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "version", + "config", + "autoscaling", + "subnetId", + "maxPodsConstraint", + "project", + "location", + "cluster", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + }, + "autoscaling": &dcl.Property{ + Type: "object", + GoName: "Autoscaling", + GoType: "NodePoolAutoscaling", + Description: "Autoscaler configuration for this node pool.", + Required: []string{ + "minNodeCount", + "maxNodeCount", + }, + Properties: map[string]*dcl.Property{ + "maxNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxNodeCount", + Description: "Maximum number of nodes in the NodePool. Must be >= min_node_count.", + }, + "minNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MinNodeCount", + Description: "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + }, + }, + }, + "cluster": &dcl.Property{ + Type: "string", + GoName: "Cluster", + Description: "The awsCluster for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkemulticloud/Cluster", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "config": &dcl.Property{ + Type: "object", + GoName: "Config", + GoType: "NodePoolConfig", + Description: "The configuration of the node pool.", + Required: []string{ + "iamInstanceProfile", + "configEncryption", + }, + Properties: map[string]*dcl.Property{ + "autoscalingMetricsCollection": &dcl.Property{ + Type: "object", + GoName: "AutoscalingMetricsCollection", + GoType: "NodePoolConfigAutoscalingMetricsCollection", + Description: "Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled.", + Required: []string{ + "granularity", + }, + Properties: map[string]*dcl.Property{ + "granularity": &dcl.Property{ + Type: "string", + GoName: "Granularity", + Description: "The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is \"1Minute\".", + }, + "metrics": &dcl.Property{ + Type: "array", + GoName: "Metrics", + Description: "The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "configEncryption": &dcl.Property{ + Type: "object", + GoName: "ConfigEncryption", + GoType: "NodePoolConfigConfigEncryption", + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + Required: []string{ + "kmsKeyArn", + }, + Properties: map[string]*dcl.Property{ + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + }, + }, + }, + "iamInstanceProfile": &dcl.Property{ + Type: "string", + GoName: "IamInstanceProfile", + Description: "The name of the AWS IAM role assigned to nodes in the pool.", + }, +{{- if ne $.TargetVersionName "ga" }} + "imageType": &dcl.Property{ + Type: "string", + GoName: "ImageType", + Description: "The OS image type to use on node pool instances.", + Immutable: true, + ServerDefault: true, + }, + "instancePlacement": &dcl.Property{ + Type: "object", + GoName: "InstancePlacement", + GoType: "NodePoolConfigInstancePlacement", + Description: "Details of placement information for an instance.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "tenancy": &dcl.Property{ + Type: "string", + GoName: "Tenancy", + GoType: "NodePoolConfigInstancePlacementTenancyEnum", + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "TENANCY_UNSPECIFIED", + "DEFAULT", + "DEDICATED", + "HOST", + }, + }, + }, + }, +{{- end }} + "instanceType": &dcl.Property{ + Type: "string", + GoName: "InstanceType", + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + ServerDefault: true, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + }, + "proxyConfig": &dcl.Property{ + Type: "object", + GoName: "ProxyConfig", + GoType: "NodePoolConfigProxyConfig", + Description: "Proxy configuration for outbound HTTP(S) traffic.", + Required: []string{ + "secretArn", + "secretVersion", + }, + Properties: map[string]*dcl.Property{ + "secretArn": &dcl.Property{ + Type: "string", + GoName: "SecretArn", + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + "secretVersion": &dcl.Property{ + Type: "string", + GoName: "SecretVersion", + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + }, + "rootVolume": &dcl.Property{ + Type: "object", + GoName: "RootVolume", + GoType: "NodePoolConfigRootVolume", + Description: "Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "iops": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Iops", + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + ServerDefault: true, + }, + "kmsKeyArn": &dcl.Property{ + Type: "string", + GoName: "KmsKeyArn", + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + ServerDefault: true, + }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + ServerDefault: true, + }, + "volumeType": &dcl.Property{ + Type: "string", + GoName: "VolumeType", + GoType: "NodePoolConfigRootVolumeVolumeTypeEnum", + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + ServerDefault: true, + Enum: []string{ + "VOLUME_TYPE_UNSPECIFIED", + "GP2", + "GP3", + }, + }, + }, + }, + "securityGroupIds": &dcl.Property{ + Type: "array", + GoName: "SecurityGroupIds", + Description: "Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + "spotConfig": &dcl.Property{ + Type: "object", + GoName: "SpotConfig", + GoType: "NodePoolConfigSpotConfig", + Description: "Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`", + Immutable: true, + Required: []string{ + "instanceTypes", + }, + Properties: map[string]*dcl.Property{ + "instanceTypes": &dcl.Property{ + Type: "array", + GoName: "InstanceTypes", + Description: "List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, +{{- end }} + }, + }, + "sshConfig": &dcl.Property{ + Type: "object", + GoName: "SshConfig", + GoType: "NodePoolConfigSshConfig", + Description: "Optional. The SSH configuration.", + Required: []string{ + "ec2KeyPair", + }, + Properties: map[string]*dcl.Property{ + "ec2KeyPair": &dcl.Property{ + Type: "string", + GoName: "Ec2KeyPair", + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + }, + "tags": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Tags", + Description: "Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + }, + "taints": &dcl.Property{ + Type: "array", + GoName: "Taints", + Description: "Optional. The initial taints assigned to nodes of this node pool.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "NodePoolConfigTaints", + Required: []string{ + "key", + "value", + "effect", + }, + Properties: map[string]*dcl.Property{ + "effect": &dcl.Property{ + Type: "string", + GoName: "Effect", + GoType: "NodePoolConfigTaintsEffectEnum", + Description: "The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE", + Immutable: true, + Enum: []string{ + "EFFECT_UNSPECIFIED", + "NO_SCHEDULE", + "PREFER_NO_SCHEDULE", + "NO_EXECUTE", + }, + }, + "key": &dcl.Property{ + Type: "string", + GoName: "Key", + Description: "Key for the taint.", + Immutable: true, + }, + "value": &dcl.Property{ + Type: "string", + GoName: "Value", + Description: "Value for the taint.", + Immutable: true, + }, + }, + }, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time at which this node pool was created.", + Immutable: true, + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "kubeletConfig": &dcl.Property{ + Type: "object", + GoName: "KubeletConfig", + GoType: "NodePoolKubeletConfig", + Description: "The kubelet configuration for the node pool.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "cpuCfsQuota": &dcl.Property{ + Type: "boolean", + GoName: "CpuCfsQuota", + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + Immutable: true, + ServerDefault: true, + }, + "cpuCfsQuotaPeriod": &dcl.Property{ + Type: "string", + GoName: "CpuCfsQuotaPeriod", + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + Immutable: true, + }, + "cpuManagerPolicy": &dcl.Property{ + Type: "string", + GoName: "CpuManagerPolicy", + GoType: "NodePoolKubeletConfigCpuManagerPolicyEnum", + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "none", + "static", + }, + }, + "podPidsLimit": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "PodPidsLimit", + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + Immutable: true, + }, + }, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "management": &dcl.Property{ + Type: "object", + GoName: "Management", + GoType: "NodePoolManagement", + Description: "The Management configuration for this node pool.", + Properties: map[string]*dcl.Property{ + "autoRepair": &dcl.Property{ + Type: "boolean", + GoName: "AutoRepair", + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + }, + "maxPodsConstraint": &dcl.Property{ + Type: "object", + GoName: "MaxPodsConstraint", + GoType: "NodePoolMaxPodsConstraint", + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + Immutable: true, + Required: []string{ + "maxPodsPerNode", + }, + Properties: map[string]*dcl.Property{ + "maxPodsPerNode": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxPodsPerNode", + Description: "The maximum number of pods to schedule on a single node.", + Immutable: true, + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of this resource.", + Immutable: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "reconciling": &dcl.Property{ + Type: "boolean", + GoName: "Reconciling", + ReadOnly: true, + Description: "Output only. If set, there are currently changes in flight to the node pool.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "NodePoolStateEnum", + ReadOnly: true, + Description: "Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR", + "DEGRADED", + }, + }, + "subnetId": &dcl.Property{ + Type: "string", + GoName: "SubnetId", + Description: "The subnet where the node pool node run.", + Immutable: true, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A globally unique identifier for the node pool.", + Immutable: true, + }, + "updateSettings": &dcl.Property{ + Type: "object", + GoName: "UpdateSettings", + GoType: "NodePoolUpdateSettings", + Description: "Optional. Update settings control the speed and disruption of the node pool update.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "surgeSettings": &dcl.Property{ + Type: "object", + GoName: "SurgeSettings", + GoType: "NodePoolUpdateSettingsSurgeSettings", + Description: "Optional. Settings for surge update.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "maxSurge": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxSurge", + Description: "Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process.", + ServerDefault: true, + }, + "maxUnavailable": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxUnavailable", + Description: "Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready.", + ServerDefault: true, + }, + }, + }, + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time at which this node pool was last updated.", + Immutable: true, + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig.", + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go new file mode 100644 index 000000000000..699256d6e026 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package containeraws + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLContainerAwsClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ContainerAwsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl new file mode 100644 index 000000000000..0541d20bc1af --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl @@ -0,0 +1,1579 @@ +package containeraws + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +{{- else }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAwsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsClusterCreate, + Read: resourceContainerAwsClusterRead, + Update: resourceContainerAwsClusterUpdate, + Delete: resourceContainerAwsClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAwsClusterAuthorizationSchema(), + }, + + "aws_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + Description: "Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterNetworkingSchema(), + }, + + "binary_authorization": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration options for the Binary Authorization feature.", + MaxItems: 1, + Elem: ContainerAwsClusterBinaryAuthorizationSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", +{{- if ne $.TargetVersionName "ga" }} + }, + + "logging_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Logging configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterLoggingConfigSchema(), +{{- end }} + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAwsClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + Description: "Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAwsClusterAuthorizationAdminUsersSchema(), + }, + + "admin_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAwsClusterAuthorizationAdminGroupsSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAwsClusterAuthorizationAdminGroupsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_services_authentication": { + Type: schema.TypeList, + Required: true, + Description: "Authentication configuration for management of AWS resources.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema(), + }, + + "config_encryption": { + Type: schema.TypeList, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneConfigEncryptionSchema(), + }, + + "database_encryption": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", + }, + + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .", + }, + +{{- if ne $.TargetVersionName "ga" }} + "instance_placement": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Details of placement information for an instance.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneInstancePlacementSchema(), + }, + +{{- end }} + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + Description: "The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account.", + }, + + "role_session_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsClusterControlPlaneInstancePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tenancy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", +{{- end }} + }, + }, + } +} + +func ContainerAwsClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + + "secret_version": { + Type: schema.TypeString, + Required: true, + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAwsClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", + }, + + "per_node_pool_sg_rules_disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", + }, + }, + } +} + +func ContainerAwsClusterBinaryAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "evaluation_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsClusterLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration of the logging components.", + MaxItems: 1, + Elem: ContainerAwsClusterLoggingConfigComponentConfigSchema(), + }, + }, + } +} + +func ContainerAwsClusterLoggingConfigComponentConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Components of the logging configuration to be enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAwsClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsCluster %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAwsClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("aws_region", res.AwsRegion); err != nil { + return fmt.Errorf("error setting aws_region in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAwsClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAwsClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAwsClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("binary_authorization", flattenContainerAwsClusterBinaryAuthorization(res.BinaryAuthorization)); err != nil { + return fmt.Errorf("error setting binary_authorization in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) +{{- if ne $.TargetVersionName "ga" }} + } + if err = d.Set("logging_config", flattenContainerAwsClusterLoggingConfig(res.LoggingConfig)); err != nil { + return fmt.Errorf("error setting logging_config in state: %s", err) +{{- end }} + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAwsClusterAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAwsClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + BinaryAuthorization: expandContainerAwsClusterBinaryAuthorization(d.Get("binary_authorization")), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAwsClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsClusterAuthorization(o interface{}) *containeraws.ClusterAuthorization { + if o == nil { + return containeraws.EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterAuthorization{ + AdminUsers: expandContainerAwsClusterAuthorizationAdminUsersArray(obj["admin_users"]), + AdminGroups: expandContainerAwsClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), + } +} + +func flattenContainerAwsClusterAuthorization(obj *containeraws.ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAwsClusterAuthorizationAdminUsersArray(obj.AdminUsers), + "admin_groups": flattenContainerAwsClusterAuthorizationAdminGroupsArray(obj.AdminGroups), + } + + return []interface{}{transformed} + +} +func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []containeraws.ClusterAuthorizationAdminUsers { + if o == nil { + return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + } + + items := make([]containeraws.ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsClusterAuthorizationAdminUsers(o interface{}) *containeraws.ClusterAuthorizationAdminUsers { + if o == nil { + return containeraws.EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &containeraws.ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []containeraws.ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsClusterAuthorizationAdminUsers(obj *containeraws.ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} +func expandContainerAwsClusterAuthorizationAdminGroupsArray(o interface{}) []containeraws.ClusterAuthorizationAdminGroups { + if o == nil { + return make([]containeraws.ClusterAuthorizationAdminGroups, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containeraws.ClusterAuthorizationAdminGroups, 0) + } + + items := make([]containeraws.ClusterAuthorizationAdminGroups, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsClusterAuthorizationAdminGroups(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsClusterAuthorizationAdminGroups(o interface{}) *containeraws.ClusterAuthorizationAdminGroups { + if o == nil { + return containeraws.EmptyClusterAuthorizationAdminGroups + } + + obj := o.(map[string]interface{}) + return &containeraws.ClusterAuthorizationAdminGroups{ + Group: dcl.String(obj["group"].(string)), + } +} + +func flattenContainerAwsClusterAuthorizationAdminGroupsArray(objs []containeraws.ClusterAuthorizationAdminGroups) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsClusterAuthorizationAdminGroups(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsClusterAuthorizationAdminGroups(obj *containeraws.ClusterAuthorizationAdminGroups) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "group": obj.Group, + } + + return transformed + +} + +func expandContainerAwsClusterControlPlane(o interface{}) *containeraws.ClusterControlPlane { + if o == nil { + return containeraws.EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlane{ + AwsServicesAuthentication: expandContainerAwsClusterControlPlaneAwsServicesAuthentication(obj["aws_services_authentication"]), + ConfigEncryption: expandContainerAwsClusterControlPlaneConfigEncryption(obj["config_encryption"]), + DatabaseEncryption: expandContainerAwsClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + SubnetIds: tpgdclresource.ExpandStringArray(obj["subnet_ids"]), + Version: dcl.String(obj["version"].(string)), +{{- if ne $.TargetVersionName "ga" }} + InstancePlacement: expandContainerAwsClusterControlPlaneInstancePlacement(obj["instance_placement"]), +{{- end }} + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + MainVolume: expandContainerAwsClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAwsClusterControlPlaneProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAwsClusterControlPlaneRootVolume(obj["root_volume"]), + SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), + SshConfig: expandContainerAwsClusterControlPlaneSshConfig(obj["ssh_config"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + } +} + +func flattenContainerAwsClusterControlPlane(obj *containeraws.ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "aws_services_authentication": flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj.AwsServicesAuthentication), + "config_encryption": flattenContainerAwsClusterControlPlaneConfigEncryption(obj.ConfigEncryption), + "database_encryption": flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "subnet_ids": obj.SubnetIds, + "version": obj.Version, +{{- if ne $.TargetVersionName "ga" }} + "instance_placement": flattenContainerAwsClusterControlPlaneInstancePlacement(obj.InstancePlacement), +{{- end }} + "instance_type": obj.InstanceType, + "main_volume": flattenContainerAwsClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAwsClusterControlPlaneProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAwsClusterControlPlaneRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, + "ssh_config": flattenContainerAwsClusterControlPlaneSshConfig(obj.SshConfig), + "tags": obj.Tags, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneAwsServicesAuthentication(o interface{}) *containeraws.ClusterControlPlaneAwsServicesAuthentication { + if o == nil { + return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneAwsServicesAuthentication{ + RoleArn: dcl.String(obj["role_arn"].(string)), + RoleSessionName: dcl.StringOrNil(obj["role_session_name"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *containeraws.ClusterControlPlaneAwsServicesAuthentication) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "role_arn": obj.RoleArn, + "role_session_name": obj.RoleSessionName, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneConfigEncryption(o interface{}) *containeraws.ClusterControlPlaneConfigEncryption { + if o == nil { + return containeraws.EmptyClusterControlPlaneConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlaneConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *containeraws.ClusterControlPlaneConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneDatabaseEncryption(o interface{}) *containeraws.ClusterControlPlaneDatabaseEncryption { + if o == nil { + return containeraws.EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneDatabaseEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *containeraws.ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsClusterControlPlaneInstancePlacement(o interface{}) *containeraws.ClusterControlPlaneInstancePlacement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneInstancePlacement{ + Tenancy: containeraws.ClusterControlPlaneInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneInstancePlacement(obj *containeraws.ClusterControlPlaneInstancePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "tenancy": obj.Tenancy, + } + + return []interface{}{transformed} + +} + +{{- end }} +func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *containeraws.ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneMainVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: containeraws.ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneMainVolume(obj *containeraws.ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneProxyConfig(o interface{}) *containeraws.ClusterControlPlaneProxyConfig { + if o == nil { + return containeraws.EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneProxyConfig{ + SecretArn: dcl.String(obj["secret_arn"].(string)), + SecretVersion: dcl.String(obj["secret_version"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneProxyConfig(obj *containeraws.ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "secret_arn": obj.SecretArn, + "secret_version": obj.SecretVersion, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *containeraws.ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: containeraws.ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneRootVolume(obj *containeraws.ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneSshConfig(o interface{}) *containeraws.ClusterControlPlaneSshConfig { + if o == nil { + return containeraws.EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneSshConfig(obj *containeraws.ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterFleet(o interface{}) *containeraws.ClusterFleet { + if o == nil { + return containeraws.EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAwsClusterFleet(obj *containeraws.ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterNetworking(o interface{}) *containeraws.ClusterNetworking { + if o == nil { + return containeraws.EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterNetworking{ + PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), + VPCId: dcl.String(obj["vpc_id"].(string)), + PerNodePoolSgRulesDisabled: dcl.Bool(obj["per_node_pool_sg_rules_disabled"].(bool)), + } +} + +func flattenContainerAwsClusterNetworking(obj *containeraws.ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "vpc_id": obj.VPCId, + "per_node_pool_sg_rules_disabled": obj.PerNodePoolSgRulesDisabled, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterBinaryAuthorization(o interface{}) *containeraws.ClusterBinaryAuthorization { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterBinaryAuthorization{ + EvaluationMode: containeraws.ClusterBinaryAuthorizationEvaluationModeEnumRef(obj["evaluation_mode"].(string)), + } +} + +func flattenContainerAwsClusterBinaryAuthorization(obj *containeraws.ClusterBinaryAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "evaluation_mode": obj.EvaluationMode, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsClusterLoggingConfig(o interface{}) *containeraws.ClusterLoggingConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterLoggingConfig{ + ComponentConfig: expandContainerAwsClusterLoggingConfigComponentConfig(obj["component_config"]), + } +} + +func flattenContainerAwsClusterLoggingConfig(obj *containeraws.ClusterLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "component_config": flattenContainerAwsClusterLoggingConfigComponentConfig(obj.ComponentConfig), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterLoggingConfigComponentConfig(o interface{}) *containeraws.ClusterLoggingConfigComponentConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterLoggingConfigComponentConfig{ + EnableComponents: expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), + } +} + +func flattenContainerAwsClusterLoggingConfigComponentConfig(obj *containeraws.ClusterLoggingConfigComponentConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_components": flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj.EnableComponents), + } + + return []interface{}{transformed} + +} + +{{- end }} +func flattenContainerAwsClusterWorkloadIdentityConfig(obj *containeraws.ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} + +func flattenContainerAwsClusterAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} +{{- if ne $.TargetVersionName "ga" }} + +func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum { + objs := o.([]interface{}) + items := make([]containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl new file mode 100644 index 000000000000..6a75936b71e5 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl @@ -0,0 +1,1026 @@ +package containeraws_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +{{- else }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccContainerAwsCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsCluster_BasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAwsCluster_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsCluster_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAwsCluster_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAwsCluster_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" +{{- if ne $.TargetVersionName "ga" }} +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + + instance_placement { + tenancy = "DEDICATED" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "updated-%{service_acct}" + } + + instance_placement { + tenancy = "DEDICATED" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +`, context) +} + +func testAccContainerAwsCluster_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "updated-%{service_acct}" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +{{- end }} +} + + +`, context) +} + +func testAccCheckContainerAwsClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.Cluster{ + AwsRegion: dcl.String(rs.Primary.Attributes["aws_region"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl new file mode 100644 index 000000000000..25d433702171 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl @@ -0,0 +1,1384 @@ +package containeraws + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +{{- else }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAwsNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsNodePoolCreate, + Read: resourceContainerAwsNodePoolRead, + Update: resourceContainerAwsNodePoolUpdate, + Delete: resourceContainerAwsNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, +{{- if ne $.TargetVersionName "ga" }} + tpgdclresource.ResourceContainerAwsNodePoolCustomizeDiffFunc, +{{- end }} + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + Description: "Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The awsCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + Description: "The configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The subnet where the node pool node run.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "kubelet_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The kubelet configuration for the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolKubeletConfigSchema(), + }, + + "management": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "The Management configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolManagementSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "update_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Update settings control the speed and disruption of the node pool update.", + MaxItems: 1, + Elem: ContainerAwsNodePoolUpdateSettingsSchema(), + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAwsNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Maximum number of nodes in the NodePool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_encryption": { + Type: schema.TypeList, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigConfigEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + Description: "The name of the AWS IAM role assigned to nodes in the pool.", + }, + + "autoscaling_metrics_collection": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigAutoscalingMetricsCollectionSchema(), + }, + +{{- if ne $.TargetVersionName "ga" }} + "image_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The OS image type to use on node pool instances.", + }, + + "instance_placement": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Details of placement information for an instance.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigInstancePlacementSchema(), + }, + +{{- end }} + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + +{{- if ne $.TargetVersionName "ga" }} + "spot_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSpotConfigSchema(), + }, + +{{- end }} + "ssh_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The SSH configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "taints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The initial taints assigned to nodes of this node pool.", + Elem: ContainerAwsNodePoolConfigTaintsSchema(), + }, + }, + } +} + +func ContainerAwsNodePoolConfigConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigAutoscalingMetricsCollectionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "granularity": { + Type: schema.TypeString, + Required: true, + Description: "The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is \"1Minute\".", + }, + + "metrics": { + Type: schema.TypeList, + Optional: true, + Description: "The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigInstancePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tenancy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", +{{- end }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_arn": { + Type: schema.TypeString, + Required: true, + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + + "secret_version": { + Type: schema.TypeString, + Required: true, + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigSpotConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_types": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAwsNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + Description: "The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigTaintsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE", + }, + + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Key for the taint.", + }, + + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Value for the taint.", + }, + }, + } +} + +func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_cfs_quota": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + }, + + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + }, + + "cpu_manager_policy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + }, + + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + }, + }, + } +} + +func ContainerAwsNodePoolManagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + } +} + +func ContainerAwsNodePoolUpdateSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "surge_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Settings for surge update.", + MaxItems: 1, + Elem: ContainerAwsNodePoolUpdateSettingsSurgeSettingsSchema(), + }, + }, + } +} + +func ContainerAwsNodePoolUpdateSettingsSurgeSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_surge": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process.", + }, + + "max_unavailable": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready.", + }, + }, + } +} + +func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsNodePool %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAwsNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAwsNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAwsNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil { + return fmt.Errorf("error setting kubelet_config in state: %s", err) + } + if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil { + return fmt.Errorf("error setting management in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("update_settings", flattenContainerAwsNodePoolUpdateSettings(res.UpdateSettings)); err != nil { + return fmt.Errorf("error setting update_settings in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAwsNodePoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), + Management: expandContainerAwsNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)/awsNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsNodePoolAutoscaling(o interface{}) *containeraws.NodePoolAutoscaling { + if o == nil { + return containeraws.EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAwsNodePoolAutoscaling(obj *containeraws.NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfig { + if o == nil { + return containeraws.EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfig{ + ConfigEncryption: expandContainerAwsNodePoolConfigConfigEncryption(obj["config_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + AutoscalingMetricsCollection: expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj["autoscaling_metrics_collection"]), +{{- if ne $.TargetVersionName "ga" }} + ImageType: dcl.StringOrNil(obj["image_type"].(string)), + InstancePlacement: expandContainerAwsNodePoolConfigInstancePlacement(obj["instance_placement"]), +{{- end }} + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + Labels: tpgresource.CheckStringMap(obj["labels"]), + ProxyConfig: expandContainerAwsNodePoolConfigProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAwsNodePoolConfigRootVolume(obj["root_volume"]), + SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), +{{- if ne $.TargetVersionName "ga" }} + SpotConfig: expandContainerAwsNodePoolConfigSpotConfig(obj["spot_config"]), +{{- end }} + SshConfig: expandContainerAwsNodePoolConfigSshConfig(obj["ssh_config"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + Taints: expandContainerAwsNodePoolConfigTaintsArray(obj["taints"]), + } +} + +func flattenContainerAwsNodePoolConfig(obj *containeraws.NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "config_encryption": flattenContainerAwsNodePoolConfigConfigEncryption(obj.ConfigEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "autoscaling_metrics_collection": flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj.AutoscalingMetricsCollection), +{{- if ne $.TargetVersionName "ga" }} + "image_type": obj.ImageType, + "instance_placement": flattenContainerAwsNodePoolConfigInstancePlacement(obj.InstancePlacement), +{{- end }} + "instance_type": obj.InstanceType, + "labels": obj.Labels, + "proxy_config": flattenContainerAwsNodePoolConfigProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAwsNodePoolConfigRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, +{{- if ne $.TargetVersionName "ga" }} + "spot_config": flattenContainerAwsNodePoolConfigSpotConfig(obj.SpotConfig), +{{- end }} + "ssh_config": flattenContainerAwsNodePoolConfigSshConfig(obj.SshConfig), + "tags": obj.Tags, + "taints": flattenContainerAwsNodePoolConfigTaintsArray(obj.Taints), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigConfigEncryption(o interface{}) *containeraws.NodePoolConfigConfigEncryption { + if o == nil { + return containeraws.EmptyNodePoolConfigConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfigConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigConfigEncryption(obj *containeraws.NodePoolConfigConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(o interface{}) *containeraws.NodePoolConfigAutoscalingMetricsCollection { + if o == nil { + return containeraws.EmptyNodePoolConfigAutoscalingMetricsCollection + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfigAutoscalingMetricsCollection + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigAutoscalingMetricsCollection{ + Granularity: dcl.String(obj["granularity"].(string)), + Metrics: tpgdclresource.ExpandStringArray(obj["metrics"]), + } +} + +func flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj *containeraws.NodePoolConfigAutoscalingMetricsCollection) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "granularity": obj.Granularity, + "metrics": obj.Metrics, +{{- if ne $.TargetVersionName "ga" }} + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigInstancePlacement(o interface{}) *containeraws.NodePoolConfigInstancePlacement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigInstancePlacement{ + Tenancy: containeraws.NodePoolConfigInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigInstancePlacement(obj *containeraws.NodePoolConfigInstancePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "tenancy": obj.Tenancy, +{{- end }} + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigProxyConfig(o interface{}) *containeraws.NodePoolConfigProxyConfig { + if o == nil { + return containeraws.EmptyNodePoolConfigProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfigProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigProxyConfig{ + SecretArn: dcl.String(obj["secret_arn"].(string)), + SecretVersion: dcl.String(obj["secret_version"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigProxyConfig(obj *containeraws.NodePoolConfigProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "secret_arn": obj.SecretArn, + "secret_version": obj.SecretVersion, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *containeraws.NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), + VolumeType: containeraws.NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigRootVolume(obj *containeraws.NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "throughput": obj.Throughput, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAwsNodePoolConfigSpotConfig(o interface{}) *containeraws.NodePoolConfigSpotConfig { + if o == nil { + return containeraws.EmptyNodePoolConfigSpotConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfigSpotConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigSpotConfig{ + InstanceTypes: tpgdclresource.ExpandStringArray(obj["instance_types"]), + } +} + +func flattenContainerAwsNodePoolConfigSpotConfig(obj *containeraws.NodePoolConfigSpotConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_types": obj.InstanceTypes, + } + + return []interface{}{transformed} + +} + +{{- end }} +func expandContainerAwsNodePoolConfigSshConfig(o interface{}) *containeraws.NodePoolConfigSshConfig { + if o == nil { + return containeraws.EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigSshConfig(obj *containeraws.NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} +func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []containeraws.NodePoolConfigTaints { + if o == nil { + return make([]containeraws.NodePoolConfigTaints, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containeraws.NodePoolConfigTaints, 0) + } + + items := make([]containeraws.NodePoolConfigTaints, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsNodePoolConfigTaints(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsNodePoolConfigTaints(o interface{}) *containeraws.NodePoolConfigTaints { + if o == nil { + return containeraws.EmptyNodePoolConfigTaints + } + + obj := o.(map[string]interface{}) + return &containeraws.NodePoolConfigTaints{ + Effect: containeraws.NodePoolConfigTaintsEffectEnumRef(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigTaintsArray(objs []containeraws.NodePoolConfigTaints) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsNodePoolConfigTaints(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsNodePoolConfigTaints(obj *containeraws.NodePoolConfigTaints) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "value": obj.Value, + } + + return transformed + +} + +func expandContainerAwsNodePoolMaxPodsConstraint(o interface{}) *containeraws.NodePoolMaxPodsConstraint { + if o == nil { + return containeraws.EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containeraws.EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolKubeletConfig{ + CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)), + CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)), + CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), + PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))), + } +} + +func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu_cfs_quota": obj.CpuCfsQuota, + "cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod, + "cpu_manager_policy": obj.CpuManagerPolicy, + "pod_pids_limit": obj.PodPidsLimit, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolManagement{ + AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), + } +} + +func flattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "auto_repair": obj.AutoRepair, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolUpdateSettings(o interface{}) *containeraws.NodePoolUpdateSettings { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolUpdateSettings{ + SurgeSettings: expandContainerAwsNodePoolUpdateSettingsSurgeSettings(obj["surge_settings"]), + } +} + +func flattenContainerAwsNodePoolUpdateSettings(obj *containeraws.NodePoolUpdateSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "surge_settings": flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj.SurgeSettings), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolUpdateSettingsSurgeSettings(o interface{}) *containeraws.NodePoolUpdateSettingsSurgeSettings { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolUpdateSettingsSurgeSettings{ + MaxSurge: dcl.Int64OrNil(int64(obj["max_surge"].(int))), + MaxUnavailable: dcl.Int64OrNil(int64(obj["max_unavailable"].(int))), + } +} + +func flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj *containeraws.NodePoolUpdateSettingsSurgeSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_surge": obj.MaxSurge, + "max_unavailable": obj.MaxUnavailable, + } + + return []interface{}{transformed} + +} + +func flattenContainerAwsNodePoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl new file mode 100644 index 000000000000..8c06b68622d3 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl @@ -0,0 +1,1587 @@ +package containeraws_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +{{- else }} + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccContainerAwsNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsNodePool_BasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAwsNodePool_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +func TestAccContainerAwsNodePool_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "service_acct": envvar.GetTestServiceAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAwsNodePool_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAwsNodePool_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + management { + auto_repair = false + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } +{{- if ne $.TargetVersionName "ga" }} + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "DEDICATED" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + annotations = { + label-one = "value-one" + } + + update_settings { + surge_settings { + max_surge = 1 + max_unavailable = 0 + } + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "DEDICATED" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + management { + auto_repair = false + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + annotations = { + label-two = "value-two" + } + + update_settings { + surge_settings { + max_surge = 1 + max_unavailable = 0 + } + } + + project = "%{project_name}" +} + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.large" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["%{aws_sg}"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" +{{- end }} + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAwsNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl new file mode 100644 index 000000000000..0b2059bcf98f --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl @@ -0,0 +1,371 @@ +package containerazure + +import ( + "context" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type AzureClient struct { + Name *string `json:"name"` + TenantId *string `json:"tenantId"` + ApplicationId *string `json:"applicationId"` + Certificate *string `json:"certificate"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *AzureClient) String() string { + return dcl.SprintResource(r) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *AzureClient) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "Client", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *AzureClient) ID() (string, error) { + if err := extractClientFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "tenant_id": dcl.ValueOrEmptyString(nr.TenantId), + "application_id": dcl.ValueOrEmptyString(nr.ApplicationId), + "certificate": dcl.ValueOrEmptyString(nr.Certificate), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClientMaxPage = -1 + +type ClientList struct { + Items []*AzureClient + + nextToken string + + pageSize int32 + + resource *AzureClient +} + +func (l *ClientList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClientList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listClient(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListClient(ctx context.Context, project, location string) (*ClientList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClientWithMaxResults(ctx, project, location, ClientMaxPage) + +} + +func (c *Client) ListClientWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClientList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &AzureClient{ + Project: &project, + Location: &location, + } + items, token, err := c.listClient(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClientList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetClient(ctx context.Context, r *AzureClient) (*AzureClient, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClientFields(r) + + b, err := c.getClientRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalClient(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClientNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClientFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteClient(ctx context.Context, r *AzureClient) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Client resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Client...") + deleteOp := deleteClientOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllClient deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllClient(ctx context.Context, project, location string, filter func(*AzureClient) bool) error { + listObj, err := c.ListClient(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllClient(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllClient(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyClient(ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *AzureClient + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClientHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClientHelper(c *Client, ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyClient...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClientFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clientDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClientDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clientApiOperation + if create { + ops = append(ops, &createClientOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClientDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClientDiff(c *Client, ctx context.Context, desired *AzureClient, rawDesired *AzureClient, ops []clientApiOperation, opts ...dcl.ApplyOption) (*AzureClient, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetClient(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClientOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapClient(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClientNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClientNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClientDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClientFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClientFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffClient(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl new file mode 100644 index 000000000000..cfc4a2249bad --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl @@ -0,0 +1,715 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *AzureClient) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "tenantId"); err != nil { + return err + } + if err := dcl.Required(r, "applicationId"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + return nil +} +func (r *AzureClient) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *AzureClient) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *AzureClient) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients", nr.basePath(), userBasePath, params), nil + +} + +func (r *AzureClient) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients?azureClientId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *AzureClient) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// clientApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clientApiOperation interface { + do(context.Context, *AzureClient, *Client) error +} + +func (c *Client) listClientRaw(ctx context.Context, r *AzureClient, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClientMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClientOperation struct { + AzureClients []map[string]interface{} `json:"azureClients"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listClient(ctx context.Context, r *AzureClient, pageToken string, pageSize int32) ([]*AzureClient, string, error) { + b, err := c.listClientRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClientOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*AzureClient + for _, v := range m.AzureClients { + res, err := unmarshalMapClient(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllClient(ctx context.Context, f func(*AzureClient) bool, resources []*AzureClient) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteClient(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClientOperation struct{} + +func (op *deleteClientOperation) do(ctx context.Context, r *AzureClient, c *Client) error { + r, err := c.GetClient(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Client not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetClient checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetClient(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClientOperation struct { + response map[string]interface{} +} + +func (op *createClientOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClientOperation) do(ctx context.Context, r *AzureClient, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetClient(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClientRaw(ctx context.Context, r *AzureClient) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clientDiffsForRawDesired(ctx context.Context, rawDesired *AzureClient, opts ...dcl.ApplyOption) (initial, desired *AzureClient, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *AzureClient + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*AzureClient); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected AzureClient, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetClient(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Client resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Client resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Client resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClientDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Client: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Client: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClientFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClientInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Client: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClientDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Client: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffClient(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClientInitialState(rawInitial, rawDesired *AzureClient) (*AzureClient, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClientDesiredState(rawDesired, rawInitial *AzureClient, opts ...dcl.ApplyOption) (*AzureClient, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + + return rawDesired, nil + } + canonicalDesired := &AzureClient{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.TenantId, rawInitial.TenantId) { + canonicalDesired.TenantId = rawInitial.TenantId + } else { + canonicalDesired.TenantId = rawDesired.TenantId + } + if dcl.StringCanonicalize(rawDesired.ApplicationId, rawInitial.ApplicationId) { + canonicalDesired.ApplicationId = rawInitial.ApplicationId + } else { + canonicalDesired.ApplicationId = rawDesired.ApplicationId + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeClientNewState(c *Client, rawNew, rawDesired *AzureClient) (*AzureClient, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.TenantId) && dcl.IsEmptyValueIndirect(rawDesired.TenantId) { + rawNew.TenantId = rawDesired.TenantId + } else { + if dcl.StringCanonicalize(rawDesired.TenantId, rawNew.TenantId) { + rawNew.TenantId = rawDesired.TenantId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ApplicationId) && dcl.IsEmptyValueIndirect(rawDesired.ApplicationId) { + rawNew.ApplicationId = rawDesired.ApplicationId + } else { + if dcl.StringCanonicalize(rawDesired.ApplicationId, rawNew.ApplicationId) { + rawNew.ApplicationId = rawDesired.ApplicationId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Certificate) && dcl.IsEmptyValueIndirect(rawDesired.Certificate) { + rawNew.Certificate = rawDesired.Certificate + } else { + if dcl.StringCanonicalize(rawDesired.Certificate, rawNew.Certificate) { + rawNew.Certificate = rawDesired.Certificate + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffClient(c *Client, desired, actual *AzureClient, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TenantId, actual.TenantId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TenantId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApplicationId, actual.ApplicationId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ApplicationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Certificate, actual.Certificate, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PemCertificate")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *AzureClient) urlNormalized() *AzureClient { + normalized := dcl.Copy(*r).(AzureClient) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.TenantId = dcl.SelfLinkToName(r.TenantId) + normalized.ApplicationId = dcl.SelfLinkToName(r.ApplicationId) + normalized.Certificate = dcl.SelfLinkToName(r.Certificate) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *AzureClient) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Client resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *AzureClient) marshal(c *Client) ([]byte, error) { + m, err := expandClient(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Client: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalClient decodes JSON responses into the Client resource schema. +func unmarshalClient(b []byte, c *Client, res *AzureClient) (*AzureClient, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapClient(m, c, res) +} + +func unmarshalMapClient(m map[string]interface{}, c *Client, res *AzureClient) (*AzureClient, error) { + + flattened := flattenClient(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandClient expands Client into a JSON request object. +func expandClient(c *Client, f *AzureClient) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClients/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.TenantId; dcl.ValueShouldBeSent(v) { + m["tenantId"] = v + } + if v := f.ApplicationId; dcl.ValueShouldBeSent(v) { + m["applicationId"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenClient flattens Client from a JSON request object into the +// Client type. +func flattenClient(c *Client, i interface{}, res *AzureClient) *AzureClient { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &AzureClient{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.TenantId = dcl.FlattenString(m["tenantId"]) + resultRes.ApplicationId = dcl.FlattenString(m["applicationId"]) + resultRes.Certificate = dcl.FlattenString(m["pemCertificate"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *AzureClient) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalClient(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clientDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clientApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClientDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clientDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clientDiff + // For each operation name, create a clientDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clientDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClientApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClientApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clientApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClientFields(r *AzureClient) error { + return nil +} + +func postReadExtractClientFields(r *AzureClient) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl new file mode 100644 index 000000000000..0ecd8512c311 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl @@ -0,0 +1,179 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLAzureClientSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "ContainerAzure/Client", + Description: "AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant.", + StructName: "AzureClient", + Reference: &dcl.Link{ + Text: "API reference", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClients", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Multicloud overview", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Client", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "client", + Required: true, + Description: "A full instance of a Client", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Client", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "client", + Required: true, + Description: "A full instance of a Client", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Client", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "client", + Required: true, + Description: "A full instance of a Client", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Client", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Client", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Client": &dcl.Component{ + Title: "AzureClient", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "tenantId", + "applicationId", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "applicationId": &dcl.Property{ + Type: "string", + GoName: "ApplicationId", + Description: "The Azure Active Directory Application ID.", + Immutable: true, + }, + "certificate": &dcl.Property{ + Type: "string", + GoName: "Certificate", + ReadOnly: true, + Description: "Output only. The PEM encoded x509 certificate.", + Immutable: true, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time at which this resource was created.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of this resource.", + Immutable: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "tenantId": &dcl.Property{ + Type: "string", + GoName: "TenantId", + Description: "The Azure Active Directory Tenant ID.", + Immutable: true, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A globally unique identifier for the client.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/client.go.tmpl b/mmv1/third_party/terraform/services/containerazure/client.go.tmpl new file mode 100644 index 000000000000..2e8b8e7a19f8 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/client.go.tmpl @@ -0,0 +1,18 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl new file mode 100644 index 000000000000..bd373a5f2370 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl @@ -0,0 +1,1347 @@ +package containerazure + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Cluster struct { + Name *string `json:"name"` + Description *string `json:"description"` + AzureRegion *string `json:"azureRegion"` + ResourceGroupId *string `json:"resourceGroupId"` + Client *string `json:"client"` + AzureServicesAuthentication *ClusterAzureServicesAuthentication `json:"azureServicesAuthentication"` + Networking *ClusterNetworking `json:"networking"` + ControlPlane *ClusterControlPlane `json:"controlPlane"` + Authorization *ClusterAuthorization `json:"authorization"` + State *ClusterStateEnum `json:"state"` + Endpoint *string `json:"endpoint"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + WorkloadIdentityConfig *ClusterWorkloadIdentityConfig `json:"workloadIdentityConfig"` + Project *string `json:"project"` + Location *string `json:"location"` + Fleet *ClusterFleet `json:"fleet"` +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig *ClusterLoggingConfig `json:"loggingConfig"` + MonitoringConfig *ClusterMonitoringConfig `json:"monitoringConfig"` +{{- end }} +} + +func (r *Cluster) String() string { + return dcl.SprintResource(r) +} + +// The enum ClusterStateEnum. +type ClusterStateEnum string + +// ClusterStateEnumRef returns a *ClusterStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStateEnumRef(s string) *ClusterStateEnum { + v := ClusterStateEnum(s) + return &v +} + +func (v ClusterStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStateEnum", +{{- if ne $.TargetVersionName "ga" }} + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterLoggingConfigComponentConfigEnableComponentsEnum. +type ClusterLoggingConfigComponentConfigEnableComponentsEnum string + +// ClusterLoggingConfigComponentConfigEnableComponentsEnumRef returns a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s string) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + v := ClusterLoggingConfigComponentConfigEnableComponentsEnum(s) + return &v +} + +func (v ClusterLoggingConfigComponentConfigEnableComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "SYSTEM_COMPONENTS", "WORKLOADS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", +{{- end }} + Value: string(v), + Valid: []string{}, + } +} + +type ClusterAzureServicesAuthentication struct { + empty bool `json:"-"` + TenantId *string `json:"tenantId"` + ApplicationId *string `json:"applicationId"` +} + +type jsonClusterAzureServicesAuthentication ClusterAzureServicesAuthentication + +func (r *ClusterAzureServicesAuthentication) UnmarshalJSON(data []byte) error { + var res jsonClusterAzureServicesAuthentication + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAzureServicesAuthentication + } else { + + r.TenantId = res.TenantId + + r.ApplicationId = res.ApplicationId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAzureServicesAuthentication is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAzureServicesAuthentication *ClusterAzureServicesAuthentication = &ClusterAzureServicesAuthentication{empty: true} + +func (r *ClusterAzureServicesAuthentication) Empty() bool { + return r.empty +} + +func (r *ClusterAzureServicesAuthentication) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAzureServicesAuthentication) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterNetworking struct { + empty bool `json:"-"` + VirtualNetworkId *string `json:"virtualNetworkId"` + PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` + ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` +} + +type jsonClusterNetworking ClusterNetworking + +func (r *ClusterNetworking) UnmarshalJSON(data []byte) error { + var res jsonClusterNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterNetworking + } else { + + r.VirtualNetworkId = res.VirtualNetworkId + + r.PodAddressCidrBlocks = res.PodAddressCidrBlocks + + r.ServiceAddressCidrBlocks = res.ServiceAddressCidrBlocks + + } + return nil +} + +// This object is used to assert a desired state where this ClusterNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterNetworking *ClusterNetworking = &ClusterNetworking{empty: true} + +func (r *ClusterNetworking) Empty() bool { + return r.empty +} + +func (r *ClusterNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlane struct { + empty bool `json:"-"` + Version *string `json:"version"` + SubnetId *string `json:"subnetId"` + VmSize *string `json:"vmSize"` + SshConfig *ClusterControlPlaneSshConfig `json:"sshConfig"` + RootVolume *ClusterControlPlaneRootVolume `json:"rootVolume"` + MainVolume *ClusterControlPlaneMainVolume `json:"mainVolume"` + DatabaseEncryption *ClusterControlPlaneDatabaseEncryption `json:"databaseEncryption"` + Tags map[string]string `json:"tags"` + ProxyConfig *ClusterControlPlaneProxyConfig `json:"proxyConfig"` + ReplicaPlacements []ClusterControlPlaneReplicaPlacements `json:"replicaPlacements"` +} + +type jsonClusterControlPlane ClusterControlPlane + +func (r *ClusterControlPlane) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlane + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlane + } else { + + r.Version = res.Version + + r.SubnetId = res.SubnetId + + r.VmSize = res.VmSize + + r.SshConfig = res.SshConfig + + r.RootVolume = res.RootVolume + + r.MainVolume = res.MainVolume + + r.DatabaseEncryption = res.DatabaseEncryption + + r.Tags = res.Tags + + r.ProxyConfig = res.ProxyConfig + + r.ReplicaPlacements = res.ReplicaPlacements + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlane is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlane *ClusterControlPlane = &ClusterControlPlane{empty: true} + +func (r *ClusterControlPlane) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlane) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlane) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneSshConfig struct { + empty bool `json:"-"` + AuthorizedKey *string `json:"authorizedKey"` +} + +type jsonClusterControlPlaneSshConfig ClusterControlPlaneSshConfig + +func (r *ClusterControlPlaneSshConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneSshConfig + } else { + + r.AuthorizedKey = res.AuthorizedKey + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneSshConfig *ClusterControlPlaneSshConfig = &ClusterControlPlaneSshConfig{empty: true} + +func (r *ClusterControlPlaneSshConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonClusterControlPlaneRootVolume ClusterControlPlaneRootVolume + +func (r *ClusterControlPlaneRootVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneRootVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneRootVolume *ClusterControlPlaneRootVolume = &ClusterControlPlaneRootVolume{empty: true} + +func (r *ClusterControlPlaneRootVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneMainVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonClusterControlPlaneMainVolume ClusterControlPlaneMainVolume + +func (r *ClusterControlPlaneMainVolume) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneMainVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneMainVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneMainVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneMainVolume *ClusterControlPlaneMainVolume = &ClusterControlPlaneMainVolume{empty: true} + +func (r *ClusterControlPlaneMainVolume) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneMainVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneMainVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneDatabaseEncryption struct { + empty bool `json:"-"` + KeyId *string `json:"keyId"` +} + +type jsonClusterControlPlaneDatabaseEncryption ClusterControlPlaneDatabaseEncryption + +func (r *ClusterControlPlaneDatabaseEncryption) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneDatabaseEncryption + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneDatabaseEncryption + } else { + + r.KeyId = res.KeyId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneDatabaseEncryption is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneDatabaseEncryption *ClusterControlPlaneDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{empty: true} + +func (r *ClusterControlPlaneDatabaseEncryption) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneDatabaseEncryption) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneDatabaseEncryption) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneProxyConfig struct { + empty bool `json:"-"` + ResourceGroupId *string `json:"resourceGroupId"` + SecretId *string `json:"secretId"` +} + +type jsonClusterControlPlaneProxyConfig ClusterControlPlaneProxyConfig + +func (r *ClusterControlPlaneProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneProxyConfig + } else { + + r.ResourceGroupId = res.ResourceGroupId + + r.SecretId = res.SecretId + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneProxyConfig *ClusterControlPlaneProxyConfig = &ClusterControlPlaneProxyConfig{empty: true} + +func (r *ClusterControlPlaneProxyConfig) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterControlPlaneReplicaPlacements struct { + empty bool `json:"-"` + SubnetId *string `json:"subnetId"` + AzureAvailabilityZone *string `json:"azureAvailabilityZone"` +} + +type jsonClusterControlPlaneReplicaPlacements ClusterControlPlaneReplicaPlacements + +func (r *ClusterControlPlaneReplicaPlacements) UnmarshalJSON(data []byte) error { + var res jsonClusterControlPlaneReplicaPlacements + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterControlPlaneReplicaPlacements + } else { + + r.SubnetId = res.SubnetId + + r.AzureAvailabilityZone = res.AzureAvailabilityZone + + } + return nil +} + +// This object is used to assert a desired state where this ClusterControlPlaneReplicaPlacements is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterControlPlaneReplicaPlacements *ClusterControlPlaneReplicaPlacements = &ClusterControlPlaneReplicaPlacements{empty: true} + +func (r *ClusterControlPlaneReplicaPlacements) Empty() bool { + return r.empty +} + +func (r *ClusterControlPlaneReplicaPlacements) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterControlPlaneReplicaPlacements) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorization struct { + empty bool `json:"-"` + AdminUsers []ClusterAuthorizationAdminUsers `json:"adminUsers"` + AdminGroups []ClusterAuthorizationAdminGroups `json:"adminGroups"` +} + +type jsonClusterAuthorization ClusterAuthorization + +func (r *ClusterAuthorization) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorization + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorization + } else { + + r.AdminUsers = res.AdminUsers + + r.AdminGroups = res.AdminGroups + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorization is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorization *ClusterAuthorization = &ClusterAuthorization{empty: true} + +func (r *ClusterAuthorization) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorization) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorization) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminUsers struct { + empty bool `json:"-"` + Username *string `json:"username"` +} + +type jsonClusterAuthorizationAdminUsers ClusterAuthorizationAdminUsers + +func (r *ClusterAuthorizationAdminUsers) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminUsers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminUsers + } else { + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminUsers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminUsers *ClusterAuthorizationAdminUsers = &ClusterAuthorizationAdminUsers{empty: true} + +func (r *ClusterAuthorizationAdminUsers) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminUsers) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminUsers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterAuthorizationAdminGroups struct { + empty bool `json:"-"` + Group *string `json:"group"` +} + +type jsonClusterAuthorizationAdminGroups ClusterAuthorizationAdminGroups + +func (r *ClusterAuthorizationAdminGroups) UnmarshalJSON(data []byte) error { + var res jsonClusterAuthorizationAdminGroups + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterAuthorizationAdminGroups + } else { + + r.Group = res.Group + + } + return nil +} + +// This object is used to assert a desired state where this ClusterAuthorizationAdminGroups is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterAuthorizationAdminGroups *ClusterAuthorizationAdminGroups = &ClusterAuthorizationAdminGroups{empty: true} + +func (r *ClusterAuthorizationAdminGroups) Empty() bool { + return r.empty +} + +func (r *ClusterAuthorizationAdminGroups) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterAuthorizationAdminGroups) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterWorkloadIdentityConfig struct { + empty bool `json:"-"` + IssuerUri *string `json:"issuerUri"` + WorkloadPool *string `json:"workloadPool"` + IdentityProvider *string `json:"identityProvider"` +} + +type jsonClusterWorkloadIdentityConfig ClusterWorkloadIdentityConfig + +func (r *ClusterWorkloadIdentityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterWorkloadIdentityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterWorkloadIdentityConfig + } else { + + r.IssuerUri = res.IssuerUri + + r.WorkloadPool = res.WorkloadPool + + r.IdentityProvider = res.IdentityProvider + + } + return nil +} + +// This object is used to assert a desired state where this ClusterWorkloadIdentityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterWorkloadIdentityConfig *ClusterWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{empty: true} + +func (r *ClusterWorkloadIdentityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterWorkloadIdentityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterWorkloadIdentityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterFleet struct { + empty bool `json:"-"` + Project *string `json:"project"` + Membership *string `json:"membership"` +} + +type jsonClusterFleet ClusterFleet + +func (r *ClusterFleet) UnmarshalJSON(data []byte) error { + var res jsonClusterFleet + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterFleet + } else { + + r.Project = res.Project + + r.Membership = res.Membership + + } + return nil +} + +// This object is used to assert a desired state where this ClusterFleet is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterFleet *ClusterFleet = &ClusterFleet{empty: true} + +func (r *ClusterFleet) Empty() bool { + return r.empty +} + +func (r *ClusterFleet) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterFleet) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type ClusterLoggingConfig struct { + empty bool `json:"-"` + ComponentConfig *ClusterLoggingConfigComponentConfig `json:"componentConfig"` +} + +type jsonClusterLoggingConfig ClusterLoggingConfig + +func (r *ClusterLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfig + } else { + + r.ComponentConfig = res.ComponentConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfig *ClusterLoggingConfig = &ClusterLoggingConfig{empty: true} + +func (r *ClusterLoggingConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterLoggingConfigComponentConfig struct { + empty bool `json:"-"` + EnableComponents []ClusterLoggingConfigComponentConfigEnableComponentsEnum `json:"enableComponents"` +} + +type jsonClusterLoggingConfigComponentConfig ClusterLoggingConfigComponentConfig + +func (r *ClusterLoggingConfigComponentConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterLoggingConfigComponentConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterLoggingConfigComponentConfig + } else { + + r.EnableComponents = res.EnableComponents + + } + return nil +} + +// This object is used to assert a desired state where this ClusterLoggingConfigComponentConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterLoggingConfigComponentConfig *ClusterLoggingConfigComponentConfig = &ClusterLoggingConfigComponentConfig{empty: true} + +func (r *ClusterLoggingConfigComponentConfig) Empty() bool { + return r.empty +} + +func (r *ClusterLoggingConfigComponentConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterLoggingConfigComponentConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfig struct { + empty bool `json:"-"` + ManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig `json:"managedPrometheusConfig"` +} + +type jsonClusterMonitoringConfig ClusterMonitoringConfig + +func (r *ClusterMonitoringConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfig + } else { + + r.ManagedPrometheusConfig = res.ManagedPrometheusConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfig *ClusterMonitoringConfig = &ClusterMonitoringConfig{empty: true} + +func (r *ClusterMonitoringConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMonitoringConfigManagedPrometheusConfig struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` +} + +type jsonClusterMonitoringConfigManagedPrometheusConfig ClusterMonitoringConfigManagedPrometheusConfig + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterMonitoringConfigManagedPrometheusConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMonitoringConfigManagedPrometheusConfig + } else { + + r.Enabled = res.Enabled + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMonitoringConfigManagedPrometheusConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMonitoringConfigManagedPrometheusConfig *ClusterMonitoringConfigManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{empty: true} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) Empty() bool { + return r.empty +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMonitoringConfigManagedPrometheusConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Cluster) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "Cluster", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *Cluster) ID() (string, error) { + if err := extractClusterFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "description": dcl.ValueOrEmptyString(nr.Description), + "azure_region": dcl.ValueOrEmptyString(nr.AzureRegion), + "resource_group_id": dcl.ValueOrEmptyString(nr.ResourceGroupId), + "client": dcl.ValueOrEmptyString(nr.Client), + "azure_services_authentication": dcl.ValueOrEmptyString(nr.AzureServicesAuthentication), + "networking": dcl.ValueOrEmptyString(nr.Networking), + "control_plane": dcl.ValueOrEmptyString(nr.ControlPlane), + "authorization": dcl.ValueOrEmptyString(nr.Authorization), + "state": dcl.ValueOrEmptyString(nr.State), + "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "workload_identity_config": dcl.ValueOrEmptyString(nr.WorkloadIdentityConfig), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "fleet": dcl.ValueOrEmptyString(nr.Fleet), +{{- if ne $.TargetVersionName "ga" }} + "logging_config": dcl.ValueOrEmptyString(nr.LoggingConfig), + "monitoring_config": dcl.ValueOrEmptyString(nr.MonitoringConfig), +{{- end }} + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClusterMaxPage = -1 + +type ClusterList struct { + Items []*Cluster + + nextToken string + + pageSize int32 + + resource *Cluster +} + +func (l *ClusterList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClusterList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) + +} + +func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Cluster{ + Project: &project, + Location: &location, + } + items, token, err := c.listCluster(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClusterList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClusterFields(r) + + b, err := c.getClusterRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalCluster(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClusterNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClusterFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Cluster resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") + deleteOp := deleteClusterOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllCluster deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { + listObj, err := c.ListCluster(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Cluster + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClusterFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clusterApiOperation + if create { + ops = append(ops, &createClusterOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetCluster(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClusterOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapCluster(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClusterFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClusterFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffCluster(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl new file mode 100644 index 000000000000..c5f544c0da29 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/cluster_internal.go.tmpl @@ -0,0 +1,6926 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Cluster) validate() error { + + if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"Client", "AzureServicesAuthentication"}, r.Client, r.AzureServicesAuthentication); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "azureRegion"); err != nil { + return err + } + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "networking"); err != nil { + return err + } + if err := dcl.Required(r, "controlPlane"); err != nil { + return err + } + if err := dcl.Required(r, "authorization"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.Required(r, "fleet"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.AzureServicesAuthentication) { + if err := r.AzureServicesAuthentication.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Networking) { + if err := r.Networking.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ControlPlane) { + if err := r.ControlPlane.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Authorization) { + if err := r.Authorization.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkloadIdentityConfig) { + if err := r.WorkloadIdentityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Fleet) { + if err := r.Fleet.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MonitoringConfig) { + if err := r.MonitoringConfig.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *ClusterAzureServicesAuthentication) validate() error { + if err := dcl.Required(r, "tenantId"); err != nil { + return err + } + if err := dcl.Required(r, "applicationId"); err != nil { + return err + } + return nil +} +func (r *ClusterNetworking) validate() error { + if err := dcl.Required(r, "virtualNetworkId"); err != nil { + return err + } + if err := dcl.Required(r, "podAddressCidrBlocks"); err != nil { + return err + } + if err := dcl.Required(r, "serviceAddressCidrBlocks"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlane) validate() error { + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "sshConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MainVolume) { + if err := r.MainVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DatabaseEncryption) { + if err := r.DatabaseEncryption.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterControlPlaneSshConfig) validate() error { + if err := dcl.Required(r, "authorizedKey"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneRootVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneMainVolume) validate() error { + return nil +} +func (r *ClusterControlPlaneDatabaseEncryption) validate() error { + if err := dcl.Required(r, "keyId"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneProxyConfig) validate() error { + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "secretId"); err != nil { + return err + } + return nil +} +func (r *ClusterControlPlaneReplicaPlacements) validate() error { + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "azureAvailabilityZone"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorization) validate() error { + if err := dcl.Required(r, "adminUsers"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminUsers) validate() error { + if err := dcl.Required(r, "username"); err != nil { + return err + } + return nil +} +func (r *ClusterAuthorizationAdminGroups) validate() error { + if err := dcl.Required(r, "group"); err != nil { + return err + } + return nil +} +func (r *ClusterWorkloadIdentityConfig) validate() error { + return nil +} +func (r *ClusterFleet) validate() error { + if err := dcl.Required(r, "project"); err != nil { + return err + } +{{- if ne $.TargetVersionName "ga" }} + return nil +} +func (r *ClusterLoggingConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ComponentConfig) { + if err := r.ComponentConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterLoggingConfigComponentConfig) validate() error { + return nil +} +func (r *ClusterMonitoringConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedPrometheusConfig) { + if err := r.ManagedPrometheusConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterMonitoringConfigManagedPrometheusConfig) validate() error { +{{- end }} + return nil +} +func (r *Cluster) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *Cluster) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters?azureClusterId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// clusterApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clusterApiOperation interface { + do(context.Context, *Cluster, *Client) error +} + +// newUpdateClusterUpdateAzureClusterRequest creates a request for an +// Cluster resource's UpdateAzureCluster update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateClusterUpdateAzureClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.Client; !dcl.IsEmptyValueIndirect(v) { + req["azureClient"] = v + } + if v, err := expandClusterAzureServicesAuthentication(c, f.AzureServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AzureServicesAuthentication into azureServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["azureServicesAuthentication"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["authorization"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["monitoringConfig"] = v +{{- end }} + } + b, err := c.getClusterRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateClusterUpdateAzureClusterRequest converts the update into +// the final JSON request body. +func marshalUpdateClusterUpdateAzureClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateClusterUpdateAzureClusterOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateClusterUpdateAzureClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + _, err := c.GetCluster(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAzureCluster") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateClusterUpdateAzureClusterRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateClusterUpdateAzureClusterRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClusterMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClusterOperation struct { + AzureClusters []map[string]interface{} `json:"azureClusters"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { + b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClusterOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Cluster + for _, v := range m.AzureClusters { + res, err := unmarshalMapCluster(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteCluster(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClusterOperation struct{} + +func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + r, err := c.GetCluster(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetCluster(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClusterOperation struct { + response map[string]interface{} +} + +func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetCluster(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Cluster + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Cluster); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetCluster(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClusterFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffCluster(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.Client) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AzureServicesAuthentication) { + rawInitial.Client = dcl.String("") + } + } + + if !dcl.IsZeroValue(rawInitial.AzureServicesAuthentication) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Client) { + rawInitial.AzureServicesAuthentication = EmptyClusterAzureServicesAuthentication + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.AzureServicesAuthentication = canonicalizeClusterAzureServicesAuthentication(rawDesired.AzureServicesAuthentication, nil, opts...) + rawDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, nil, opts...) + rawDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, nil, opts...) + rawDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, nil, opts...) + rawDesired.WorkloadIdentityConfig = canonicalizeClusterWorkloadIdentityConfig(rawDesired.WorkloadIdentityConfig, nil, opts...) + rawDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, nil, opts...) +{{- if ne $.TargetVersionName "ga" }} + rawDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, nil, opts...) + rawDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, nil, opts...) +{{- end }} + + return rawDesired, nil + } + canonicalDesired := &Cluster{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.StringCanonicalize(rawDesired.AzureRegion, rawInitial.AzureRegion) { + canonicalDesired.AzureRegion = rawInitial.AzureRegion + } else { + canonicalDesired.AzureRegion = rawDesired.AzureRegion + } + if dcl.StringCanonicalize(rawDesired.ResourceGroupId, rawInitial.ResourceGroupId) { + canonicalDesired.ResourceGroupId = rawInitial.ResourceGroupId + } else { + canonicalDesired.ResourceGroupId = rawDesired.ResourceGroupId + } + if dcl.IsZeroValue(rawDesired.Client) || (dcl.IsEmptyValueIndirect(rawDesired.Client) && dcl.IsEmptyValueIndirect(rawInitial.Client)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Client = rawInitial.Client + } else { + canonicalDesired.Client = rawDesired.Client + } + canonicalDesired.AzureServicesAuthentication = canonicalizeClusterAzureServicesAuthentication(rawDesired.AzureServicesAuthentication, rawInitial.AzureServicesAuthentication, opts...) + canonicalDesired.Networking = canonicalizeClusterNetworking(rawDesired.Networking, rawInitial.Networking, opts...) + canonicalDesired.ControlPlane = canonicalizeClusterControlPlane(rawDesired.ControlPlane, rawInitial.ControlPlane, opts...) + canonicalDesired.Authorization = canonicalizeClusterAuthorization(rawDesired.Authorization, rawInitial.Authorization, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.Fleet = canonicalizeClusterFleet(rawDesired.Fleet, rawInitial.Fleet, opts...) +{{- if ne $.TargetVersionName "ga" }} + canonicalDesired.LoggingConfig = canonicalizeClusterLoggingConfig(rawDesired.LoggingConfig, rawInitial.LoggingConfig, opts...) + canonicalDesired.MonitoringConfig = canonicalizeClusterMonitoringConfig(rawDesired.MonitoringConfig, rawInitial.MonitoringConfig, opts...) +{{- end }} + + if canonicalDesired.Client != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AzureServicesAuthentication) { + canonicalDesired.Client = dcl.String("") + } + } + + if canonicalDesired.AzureServicesAuthentication != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Client) { + canonicalDesired.AzureServicesAuthentication = EmptyClusterAzureServicesAuthentication + } + } + + return canonicalDesired, nil +} + +func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureRegion) && dcl.IsEmptyValueIndirect(rawDesired.AzureRegion) { + rawNew.AzureRegion = rawDesired.AzureRegion + } else { + if dcl.StringCanonicalize(rawDesired.AzureRegion, rawNew.AzureRegion) { + rawNew.AzureRegion = rawDesired.AzureRegion + } + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceGroupId) && dcl.IsEmptyValueIndirect(rawDesired.ResourceGroupId) { + rawNew.ResourceGroupId = rawDesired.ResourceGroupId + } else { + if dcl.StringCanonicalize(rawDesired.ResourceGroupId, rawNew.ResourceGroupId) { + rawNew.ResourceGroupId = rawDesired.ResourceGroupId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Client) && dcl.IsEmptyValueIndirect(rawDesired.Client) { + rawNew.Client = rawDesired.Client + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureServicesAuthentication) && dcl.IsEmptyValueIndirect(rawDesired.AzureServicesAuthentication) { + rawNew.AzureServicesAuthentication = rawDesired.AzureServicesAuthentication + } else { + rawNew.AzureServicesAuthentication = canonicalizeNewClusterAzureServicesAuthentication(c, rawDesired.AzureServicesAuthentication, rawNew.AzureServicesAuthentication) + } + + if dcl.IsEmptyValueIndirect(rawNew.Networking) && dcl.IsEmptyValueIndirect(rawDesired.Networking) { + rawNew.Networking = rawDesired.Networking + } else { + rawNew.Networking = canonicalizeNewClusterNetworking(c, rawDesired.Networking, rawNew.Networking) + } + + if dcl.IsEmptyValueIndirect(rawNew.ControlPlane) && dcl.IsEmptyValueIndirect(rawDesired.ControlPlane) { + rawNew.ControlPlane = rawDesired.ControlPlane + } else { + rawNew.ControlPlane = canonicalizeNewClusterControlPlane(c, rawDesired.ControlPlane, rawNew.ControlPlane) + } + + if dcl.IsEmptyValueIndirect(rawNew.Authorization) && dcl.IsEmptyValueIndirect(rawDesired.Authorization) { + rawNew.Authorization = rawDesired.Authorization + } else { + rawNew.Authorization = canonicalizeNewClusterAuthorization(c, rawDesired.Authorization, rawNew.Authorization) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } else { + if dcl.StringCanonicalize(rawDesired.Endpoint, rawNew.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkloadIdentityConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkloadIdentityConfig) { + rawNew.WorkloadIdentityConfig = rawDesired.WorkloadIdentityConfig + } else { + rawNew.WorkloadIdentityConfig = canonicalizeNewClusterWorkloadIdentityConfig(c, rawDesired.WorkloadIdentityConfig, rawNew.WorkloadIdentityConfig) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.Fleet) && dcl.IsEmptyValueIndirect(rawDesired.Fleet) { + rawNew.Fleet = rawDesired.Fleet + } else { + rawNew.Fleet = canonicalizeNewClusterFleet(c, rawDesired.Fleet, rawNew.Fleet) +{{- if ne $.TargetVersionName "ga" }} + } + + if dcl.IsEmptyValueIndirect(rawNew.LoggingConfig) && dcl.IsEmptyValueIndirect(rawDesired.LoggingConfig) { + rawNew.LoggingConfig = rawDesired.LoggingConfig + } else { + rawNew.LoggingConfig = canonicalizeNewClusterLoggingConfig(c, rawDesired.LoggingConfig, rawNew.LoggingConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.MonitoringConfig) && dcl.IsEmptyValueIndirect(rawDesired.MonitoringConfig) { + rawNew.MonitoringConfig = rawDesired.MonitoringConfig + } else { + rawNew.MonitoringConfig = canonicalizeNewClusterMonitoringConfig(c, rawDesired.MonitoringConfig, rawNew.MonitoringConfig) +{{- end }} + } + + return rawNew, nil +} + +func canonicalizeClusterAzureServicesAuthentication(des, initial *ClusterAzureServicesAuthentication, opts ...dcl.ApplyOption) *ClusterAzureServicesAuthentication { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAzureServicesAuthentication{} + + if dcl.StringCanonicalize(des.TenantId, initial.TenantId) || dcl.IsZeroValue(des.TenantId) { + cDes.TenantId = initial.TenantId + } else { + cDes.TenantId = des.TenantId + } + if dcl.StringCanonicalize(des.ApplicationId, initial.ApplicationId) || dcl.IsZeroValue(des.ApplicationId) { + cDes.ApplicationId = initial.ApplicationId + } else { + cDes.ApplicationId = des.ApplicationId + } + + return cDes +} + +func canonicalizeClusterAzureServicesAuthenticationSlice(des, initial []ClusterAzureServicesAuthentication, opts ...dcl.ApplyOption) []ClusterAzureServicesAuthentication { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAzureServicesAuthentication, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAzureServicesAuthentication(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAzureServicesAuthentication, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAzureServicesAuthentication(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAzureServicesAuthentication(c *Client, des, nw *ClusterAzureServicesAuthentication) *ClusterAzureServicesAuthentication { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAzureServicesAuthentication while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.TenantId, nw.TenantId) { + nw.TenantId = des.TenantId + } + if dcl.StringCanonicalize(des.ApplicationId, nw.ApplicationId) { + nw.ApplicationId = des.ApplicationId + } + + return nw +} + +func canonicalizeNewClusterAzureServicesAuthenticationSet(c *Client, des, nw []ClusterAzureServicesAuthentication) []ClusterAzureServicesAuthentication { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAzureServicesAuthentication + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAzureServicesAuthenticationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAzureServicesAuthentication(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAzureServicesAuthenticationSlice(c *Client, des, nw []ClusterAzureServicesAuthentication) []ClusterAzureServicesAuthentication { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAzureServicesAuthentication + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAzureServicesAuthentication(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterNetworking(des, initial *ClusterNetworking, opts ...dcl.ApplyOption) *ClusterNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterNetworking{} + + if dcl.StringCanonicalize(des.VirtualNetworkId, initial.VirtualNetworkId) || dcl.IsZeroValue(des.VirtualNetworkId) { + cDes.VirtualNetworkId = initial.VirtualNetworkId + } else { + cDes.VirtualNetworkId = des.VirtualNetworkId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, initial.PodAddressCidrBlocks) { + cDes.PodAddressCidrBlocks = initial.PodAddressCidrBlocks + } else { + cDes.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, initial.ServiceAddressCidrBlocks) { + cDes.ServiceAddressCidrBlocks = initial.ServiceAddressCidrBlocks + } else { + cDes.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + + return cDes +} + +func canonicalizeClusterNetworkingSlice(des, initial []ClusterNetworking, opts ...dcl.ApplyOption) []ClusterNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterNetworking(c *Client, des, nw *ClusterNetworking) *ClusterNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VirtualNetworkId, nw.VirtualNetworkId) { + nw.VirtualNetworkId = des.VirtualNetworkId + } + if dcl.StringArrayCanonicalize(des.PodAddressCidrBlocks, nw.PodAddressCidrBlocks) { + nw.PodAddressCidrBlocks = des.PodAddressCidrBlocks + } + if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, nw.ServiceAddressCidrBlocks) { + nw.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks + } + + return nw +} + +func canonicalizeNewClusterNetworkingSet(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterNetworkingSlice(c *Client, des, nw []ClusterNetworking) []ClusterNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlane(des, initial *ClusterControlPlane, opts ...dcl.ApplyOption) *ClusterControlPlane { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlane{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.StringCanonicalize(des.SubnetId, initial.SubnetId) || dcl.IsZeroValue(des.SubnetId) { + cDes.SubnetId = initial.SubnetId + } else { + cDes.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.VmSize, initial.VmSize) || dcl.IsZeroValue(des.VmSize) { + cDes.VmSize = initial.VmSize + } else { + cDes.VmSize = des.VmSize + } + cDes.SshConfig = canonicalizeClusterControlPlaneSshConfig(des.SshConfig, initial.SshConfig, opts...) + cDes.RootVolume = canonicalizeClusterControlPlaneRootVolume(des.RootVolume, initial.RootVolume, opts...) + cDes.MainVolume = canonicalizeClusterControlPlaneMainVolume(des.MainVolume, initial.MainVolume, opts...) + cDes.DatabaseEncryption = canonicalizeClusterControlPlaneDatabaseEncryption(des.DatabaseEncryption, initial.DatabaseEncryption, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + cDes.ProxyConfig = canonicalizeClusterControlPlaneProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) + cDes.ReplicaPlacements = canonicalizeClusterControlPlaneReplicaPlacementsSlice(des.ReplicaPlacements, initial.ReplicaPlacements, opts...) + + return cDes +} + +func canonicalizeClusterControlPlaneSlice(des, initial []ClusterControlPlane, opts ...dcl.ApplyOption) []ClusterControlPlane { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlane, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlane(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlane, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlane(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlane(c *Client, des, nw *ClusterControlPlane) *ClusterControlPlane { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlane while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + if dcl.StringCanonicalize(des.SubnetId, nw.SubnetId) { + nw.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.VmSize, nw.VmSize) { + nw.VmSize = des.VmSize + } + nw.SshConfig = canonicalizeNewClusterControlPlaneSshConfig(c, des.SshConfig, nw.SshConfig) + nw.RootVolume = canonicalizeNewClusterControlPlaneRootVolume(c, des.RootVolume, nw.RootVolume) + nw.MainVolume = canonicalizeNewClusterControlPlaneMainVolume(c, des.MainVolume, nw.MainVolume) + nw.DatabaseEncryption = canonicalizeNewClusterControlPlaneDatabaseEncryption(c, des.DatabaseEncryption, nw.DatabaseEncryption) + nw.ProxyConfig = canonicalizeNewClusterControlPlaneProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) + nw.ReplicaPlacements = canonicalizeNewClusterControlPlaneReplicaPlacementsSlice(c, des.ReplicaPlacements, nw.ReplicaPlacements) + + return nw +} + +func canonicalizeNewClusterControlPlaneSet(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlane + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSlice(c *Client, des, nw []ClusterControlPlane) []ClusterControlPlane { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlane + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlane(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneSshConfig(des, initial *ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneSshConfig{} + + if dcl.StringCanonicalize(des.AuthorizedKey, initial.AuthorizedKey) || dcl.IsZeroValue(des.AuthorizedKey) { + cDes.AuthorizedKey = initial.AuthorizedKey + } else { + cDes.AuthorizedKey = des.AuthorizedKey + } + + return cDes +} + +func canonicalizeClusterControlPlaneSshConfigSlice(des, initial []ClusterControlPlaneSshConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneSshConfig(c *Client, des, nw *ClusterControlPlaneSshConfig) *ClusterControlPlaneSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AuthorizedKey, nw.AuthorizedKey) { + nw.AuthorizedKey = des.AuthorizedKey + } + + return nw +} + +func canonicalizeNewClusterControlPlaneSshConfigSet(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneSshConfigSlice(c *Client, des, nw []ClusterControlPlaneSshConfig) []ClusterControlPlaneSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneRootVolume(des, initial *ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeClusterControlPlaneRootVolumeSlice(des, initial []ClusterControlPlaneRootVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneRootVolume(c *Client, des, nw *ClusterControlPlaneRootVolume) *ClusterControlPlaneRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneRootVolumeSet(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneRootVolumeSlice(c *Client, des, nw []ClusterControlPlaneRootVolume) []ClusterControlPlaneRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneMainVolume(des, initial *ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) *ClusterControlPlaneMainVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneMainVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeClusterControlPlaneMainVolumeSlice(des, initial []ClusterControlPlaneMainVolume, opts ...dcl.ApplyOption) []ClusterControlPlaneMainVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneMainVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneMainVolume(c *Client, des, nw *ClusterControlPlaneMainVolume) *ClusterControlPlaneMainVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneMainVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterControlPlaneMainVolumeSet(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneMainVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneMainVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneMainVolumeSlice(c *Client, des, nw []ClusterControlPlaneMainVolume) []ClusterControlPlaneMainVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneMainVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneMainVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneDatabaseEncryption(des, initial *ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) *ClusterControlPlaneDatabaseEncryption { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.StringCanonicalize(des.KeyId, initial.KeyId) || dcl.IsZeroValue(des.KeyId) { + cDes.KeyId = initial.KeyId + } else { + cDes.KeyId = des.KeyId + } + + return cDes +} + +func canonicalizeClusterControlPlaneDatabaseEncryptionSlice(des, initial []ClusterControlPlaneDatabaseEncryption, opts ...dcl.ApplyOption) []ClusterControlPlaneDatabaseEncryption { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneDatabaseEncryption(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryption(c *Client, des, nw *ClusterControlPlaneDatabaseEncryption) *ClusterControlPlaneDatabaseEncryption { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneDatabaseEncryption while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KeyId, nw.KeyId) { + nw.KeyId = des.KeyId + } + + return nw +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSet(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneDatabaseEncryption + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneDatabaseEncryptionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneDatabaseEncryptionSlice(c *Client, des, nw []ClusterControlPlaneDatabaseEncryption) []ClusterControlPlaneDatabaseEncryption { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneDatabaseEncryption + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneDatabaseEncryption(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneProxyConfig(des, initial *ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) *ClusterControlPlaneProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneProxyConfig{} + + if dcl.StringCanonicalize(des.ResourceGroupId, initial.ResourceGroupId) || dcl.IsZeroValue(des.ResourceGroupId) { + cDes.ResourceGroupId = initial.ResourceGroupId + } else { + cDes.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, initial.SecretId) || dcl.IsZeroValue(des.SecretId) { + cDes.SecretId = initial.SecretId + } else { + cDes.SecretId = des.SecretId + } + + return cDes +} + +func canonicalizeClusterControlPlaneProxyConfigSlice(des, initial []ClusterControlPlaneProxyConfig, opts ...dcl.ApplyOption) []ClusterControlPlaneProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneProxyConfig(c *Client, des, nw *ClusterControlPlaneProxyConfig) *ClusterControlPlaneProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceGroupId, nw.ResourceGroupId) { + nw.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, nw.SecretId) { + nw.SecretId = des.SecretId + } + + return nw +} + +func canonicalizeNewClusterControlPlaneProxyConfigSet(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneProxyConfigSlice(c *Client, des, nw []ClusterControlPlaneProxyConfig) []ClusterControlPlaneProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneProxyConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterControlPlaneReplicaPlacements(des, initial *ClusterControlPlaneReplicaPlacements, opts ...dcl.ApplyOption) *ClusterControlPlaneReplicaPlacements { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterControlPlaneReplicaPlacements{} + + if dcl.StringCanonicalize(des.SubnetId, initial.SubnetId) || dcl.IsZeroValue(des.SubnetId) { + cDes.SubnetId = initial.SubnetId + } else { + cDes.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.AzureAvailabilityZone, initial.AzureAvailabilityZone) || dcl.IsZeroValue(des.AzureAvailabilityZone) { + cDes.AzureAvailabilityZone = initial.AzureAvailabilityZone + } else { + cDes.AzureAvailabilityZone = des.AzureAvailabilityZone + } + + return cDes +} + +func canonicalizeClusterControlPlaneReplicaPlacementsSlice(des, initial []ClusterControlPlaneReplicaPlacements, opts ...dcl.ApplyOption) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterControlPlaneReplicaPlacements(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterControlPlaneReplicaPlacements(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterControlPlaneReplicaPlacements(c *Client, des, nw *ClusterControlPlaneReplicaPlacements) *ClusterControlPlaneReplicaPlacements { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterControlPlaneReplicaPlacements while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SubnetId, nw.SubnetId) { + nw.SubnetId = des.SubnetId + } + if dcl.StringCanonicalize(des.AzureAvailabilityZone, nw.AzureAvailabilityZone) { + nw.AzureAvailabilityZone = des.AzureAvailabilityZone + } + + return nw +} + +func canonicalizeNewClusterControlPlaneReplicaPlacementsSet(c *Client, des, nw []ClusterControlPlaneReplicaPlacements) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterControlPlaneReplicaPlacements + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterControlPlaneReplicaPlacementsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterControlPlaneReplicaPlacements(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterControlPlaneReplicaPlacementsSlice(c *Client, des, nw []ClusterControlPlaneReplicaPlacements) []ClusterControlPlaneReplicaPlacements { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterControlPlaneReplicaPlacements + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterControlPlaneReplicaPlacements(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorization(des, initial *ClusterAuthorization, opts ...dcl.ApplyOption) *ClusterAuthorization { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorization{} + + cDes.AdminUsers = canonicalizeClusterAuthorizationAdminUsersSlice(des.AdminUsers, initial.AdminUsers, opts...) + cDes.AdminGroups = canonicalizeClusterAuthorizationAdminGroupsSlice(des.AdminGroups, initial.AdminGroups, opts...) + + return cDes +} + +func canonicalizeClusterAuthorizationSlice(des, initial []ClusterAuthorization, opts ...dcl.ApplyOption) []ClusterAuthorization { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorization, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorization(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorization, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorization(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorization(c *Client, des, nw *ClusterAuthorization) *ClusterAuthorization { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorization while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AdminUsers = canonicalizeNewClusterAuthorizationAdminUsersSlice(c, des.AdminUsers, nw.AdminUsers) + nw.AdminGroups = canonicalizeNewClusterAuthorizationAdminGroupsSlice(c, des.AdminGroups, nw.AdminGroups) + + return nw +} + +func canonicalizeNewClusterAuthorizationSet(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorization + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationSlice(c *Client, des, nw []ClusterAuthorization) []ClusterAuthorization { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorization + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorization(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminUsers(des, initial *ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminUsers{} + + if dcl.StringCanonicalize(des.Username, initial.Username) || dcl.IsZeroValue(des.Username) { + cDes.Username = initial.Username + } else { + cDes.Username = des.Username + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminUsersSlice(des, initial []ClusterAuthorizationAdminUsers, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminUsers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminUsers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminUsers(c *Client, des, nw *ClusterAuthorizationAdminUsers) *ClusterAuthorizationAdminUsers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminUsers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminUsersSet(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminUsers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminUsersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminUsersSlice(c *Client, des, nw []ClusterAuthorizationAdminUsers) []ClusterAuthorizationAdminUsers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminUsers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminUsers(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterAuthorizationAdminGroups(des, initial *ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) *ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterAuthorizationAdminGroups{} + + if dcl.StringCanonicalize(des.Group, initial.Group) || dcl.IsZeroValue(des.Group) { + cDes.Group = initial.Group + } else { + cDes.Group = des.Group + } + + return cDes +} + +func canonicalizeClusterAuthorizationAdminGroupsSlice(des, initial []ClusterAuthorizationAdminGroups, opts ...dcl.ApplyOption) []ClusterAuthorizationAdminGroups { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterAuthorizationAdminGroups(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterAuthorizationAdminGroups(c *Client, des, nw *ClusterAuthorizationAdminGroups) *ClusterAuthorizationAdminGroups { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterAuthorizationAdminGroups while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Group, nw.Group) { + nw.Group = des.Group + } + + return nw +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSet(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterAuthorizationAdminGroups + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterAuthorizationAdminGroupsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterAuthorizationAdminGroupsSlice(c *Client, des, nw []ClusterAuthorizationAdminGroups) []ClusterAuthorizationAdminGroups { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterAuthorizationAdminGroups + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterAuthorizationAdminGroups(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterWorkloadIdentityConfig(des, initial *ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) *ClusterWorkloadIdentityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterWorkloadIdentityConfig{} + + if dcl.StringCanonicalize(des.IssuerUri, initial.IssuerUri) || dcl.IsZeroValue(des.IssuerUri) { + cDes.IssuerUri = initial.IssuerUri + } else { + cDes.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, initial.WorkloadPool) || dcl.IsZeroValue(des.WorkloadPool) { + cDes.WorkloadPool = initial.WorkloadPool + } else { + cDes.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, initial.IdentityProvider) || dcl.IsZeroValue(des.IdentityProvider) { + cDes.IdentityProvider = initial.IdentityProvider + } else { + cDes.IdentityProvider = des.IdentityProvider + } + + return cDes +} + +func canonicalizeClusterWorkloadIdentityConfigSlice(des, initial []ClusterWorkloadIdentityConfig, opts ...dcl.ApplyOption) []ClusterWorkloadIdentityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterWorkloadIdentityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterWorkloadIdentityConfig(c *Client, des, nw *ClusterWorkloadIdentityConfig) *ClusterWorkloadIdentityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterWorkloadIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IssuerUri, nw.IssuerUri) { + nw.IssuerUri = des.IssuerUri + } + if dcl.StringCanonicalize(des.WorkloadPool, nw.WorkloadPool) { + nw.WorkloadPool = des.WorkloadPool + } + if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { + nw.IdentityProvider = des.IdentityProvider + } + + return nw +} + +func canonicalizeNewClusterWorkloadIdentityConfigSet(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterWorkloadIdentityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterWorkloadIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterWorkloadIdentityConfigSlice(c *Client, des, nw []ClusterWorkloadIdentityConfig) []ClusterWorkloadIdentityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterWorkloadIdentityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterWorkloadIdentityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterFleet(des, initial *ClusterFleet, opts ...dcl.ApplyOption) *ClusterFleet { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterFleet{} + + if dcl.PartialSelfLinkToSelfLink(des.Project, initial.Project) || dcl.IsZeroValue(des.Project) { + cDes.Project = initial.Project + } else { + cDes.Project = des.Project + } + + return cDes +} + +func canonicalizeClusterFleetSlice(des, initial []ClusterFleet, opts ...dcl.ApplyOption) []ClusterFleet { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterFleet, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterFleet(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterFleet, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterFleet(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterFleet(c *Client, des, nw *ClusterFleet) *ClusterFleet { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterFleet while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.PartialSelfLinkToSelfLink(des.Project, nw.Project) { + nw.Project = des.Project + } + if dcl.StringCanonicalize(des.Membership, nw.Membership) { + nw.Membership = des.Membership + } + + return nw +} + +func canonicalizeNewClusterFleetSet(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterFleet + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterFleetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterFleet(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterFleetSlice(c *Client, des, nw []ClusterFleet) []ClusterFleet { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterFleet + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterFleet(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeClusterLoggingConfig(des, initial *ClusterLoggingConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfig{} + + cDes.ComponentConfig = canonicalizeClusterLoggingConfigComponentConfig(des.ComponentConfig, initial.ComponentConfig, opts...) + + return cDes +} + +func canonicalizeClusterLoggingConfigSlice(des, initial []ClusterLoggingConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfig(c *Client, des, nw *ClusterLoggingConfig) *ClusterLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ComponentConfig = canonicalizeNewClusterLoggingConfigComponentConfig(c, des.ComponentConfig, nw.ComponentConfig) + + return nw +} + +func canonicalizeNewClusterLoggingConfigSet(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigSlice(c *Client, des, nw []ClusterLoggingConfig) []ClusterLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterLoggingConfigComponentConfig(des, initial *ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) *ClusterLoggingConfigComponentConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsZeroValue(des.EnableComponents) || (dcl.IsEmptyValueIndirect(des.EnableComponents) && dcl.IsEmptyValueIndirect(initial.EnableComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.EnableComponents = initial.EnableComponents + } else { + cDes.EnableComponents = des.EnableComponents + } + + return cDes +} + +func canonicalizeClusterLoggingConfigComponentConfigSlice(des, initial []ClusterLoggingConfigComponentConfig, opts ...dcl.ApplyOption) []ClusterLoggingConfigComponentConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterLoggingConfigComponentConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterLoggingConfigComponentConfig(c *Client, des, nw *ClusterLoggingConfigComponentConfig) *ClusterLoggingConfigComponentConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterLoggingConfigComponentConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSet(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterLoggingConfigComponentConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterLoggingConfigComponentConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterLoggingConfigComponentConfigSlice(c *Client, des, nw []ClusterLoggingConfigComponentConfig) []ClusterLoggingConfigComponentConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterLoggingConfigComponentConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterLoggingConfigComponentConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfig(des, initial *ClusterMonitoringConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfig{} + + cDes.ManagedPrometheusConfig = canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des.ManagedPrometheusConfig, initial.ManagedPrometheusConfig, opts...) + + return cDes +} + +func canonicalizeClusterMonitoringConfigSlice(des, initial []ClusterMonitoringConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfig(c *Client, des, nw *ClusterMonitoringConfig) *ClusterMonitoringConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedPrometheusConfig = canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, des.ManagedPrometheusConfig, nw.ManagedPrometheusConfig) + + return nw +} + +func canonicalizeNewClusterMonitoringConfigSet(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigSlice(c *Client, des, nw []ClusterMonitoringConfig) []ClusterMonitoringConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfig(des, initial *ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) *ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + + return cDes +} + +func canonicalizeClusterMonitoringConfigManagedPrometheusConfigSlice(des, initial []ClusterMonitoringConfigManagedPrometheusConfig, opts ...dcl.ApplyOption) []ClusterMonitoringConfigManagedPrometheusConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMonitoringConfigManagedPrometheusConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c *Client, des, nw *ClusterMonitoringConfigManagedPrometheusConfig) *ClusterMonitoringConfigManagedPrometheusConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMonitoringConfigManagedPrometheusConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + + return nw +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSet(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMonitoringConfigManagedPrometheusConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, des, nw []ClusterMonitoringConfigManagedPrometheusConfig) []ClusterMonitoringConfigManagedPrometheusConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMonitoringConfigManagedPrometheusConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMonitoringConfigManagedPrometheusConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureRegion, actual.AzureRegion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureRegion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Client, actual.Client, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AzureClient")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureServicesAuthentication, actual.AzureServicesAuthentication, dcl.DiffInfo{ObjectFunction: compareClusterAzureServicesAuthenticationNewStyle, EmptyObject: EmptyClusterAzureServicesAuthentication, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AzureServicesAuthentication")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Networking, actual.Networking, dcl.DiffInfo{ObjectFunction: compareClusterNetworkingNewStyle, EmptyObject: EmptyClusterNetworking, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Networking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneNewStyle, EmptyObject: EmptyClusterControlPlane, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Authorization, actual.Authorization, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationNewStyle, EmptyObject: EmptyClusterAuthorization, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Authorization")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadIdentityConfig, actual.WorkloadIdentityConfig, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterWorkloadIdentityConfigNewStyle, EmptyObject: EmptyClusterWorkloadIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fleet, actual.Fleet, dcl.DiffInfo{ObjectFunction: compareClusterFleetNewStyle, EmptyObject: EmptyClusterFleet, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fleet")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- if ne $.TargetVersionName "ga" }} + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigNewStyle, EmptyObject: EmptyClusterLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MonitoringConfig, actual.MonitoringConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoringConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + +{{- end }} + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareClusterAzureServicesAuthenticationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAzureServicesAuthentication) + if !ok { + desiredNotPointer, ok := d.(ClusterAzureServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAzureServicesAuthentication or *ClusterAzureServicesAuthentication", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAzureServicesAuthentication) + if !ok { + actualNotPointer, ok := a.(ClusterAzureServicesAuthentication) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAzureServicesAuthentication", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TenantId, actual.TenantId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("TenantId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ApplicationId, actual.ApplicationId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("ApplicationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterNetworking) + if !ok { + desiredNotPointer, ok := d.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking or *ClusterNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterNetworking) + if !ok { + actualNotPointer, ok := a.(ClusterNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VirtualNetworkId, actual.VirtualNetworkId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VirtualNetworkId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAddressCidrBlocks, actual.PodAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAddressCidrBlocks, actual.ServiceAddressCidrBlocks, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAddressCidrBlocks")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlane) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane or *ClusterControlPlane", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlane) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlane) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlane", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VmSize, actual.VmSize, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("VmSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneSshConfigNewStyle, EmptyObject: EmptyClusterControlPlaneSshConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneRootVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainVolume, actual.MainVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterControlPlaneMainVolumeNewStyle, EmptyObject: EmptyClusterControlPlaneMainVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DatabaseEncryption, actual.DatabaseEncryption, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneDatabaseEncryptionNewStyle, EmptyObject: EmptyClusterControlPlaneDatabaseEncryption, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DatabaseEncryption")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneProxyConfigNewStyle, EmptyObject: EmptyClusterControlPlaneProxyConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReplicaPlacements, actual.ReplicaPlacements, dcl.DiffInfo{ObjectFunction: compareClusterControlPlaneReplicaPlacementsNewStyle, EmptyObject: EmptyClusterControlPlaneReplicaPlacements, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReplicaPlacements")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneSshConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig or *ClusterControlPlaneSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneSshConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AuthorizedKey, actual.AuthorizedKey, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AuthorizedKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneRootVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume or *ClusterControlPlaneRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneRootVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneMainVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneMainVolume) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume or *ClusterControlPlaneMainVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneMainVolume) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneMainVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneMainVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneDatabaseEncryptionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption or *ClusterControlPlaneDatabaseEncryption", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneDatabaseEncryption) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneDatabaseEncryption) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneDatabaseEncryption", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KeyId, actual.KeyId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneProxyConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig or *ClusterControlPlaneProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneProxyConfig) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretId, actual.SecretId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterControlPlaneReplicaPlacementsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterControlPlaneReplicaPlacements) + if !ok { + desiredNotPointer, ok := d.(ClusterControlPlaneReplicaPlacements) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneReplicaPlacements or *ClusterControlPlaneReplicaPlacements", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterControlPlaneReplicaPlacements) + if !ok { + actualNotPointer, ok := a.(ClusterControlPlaneReplicaPlacements) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterControlPlaneReplicaPlacements", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureAvailabilityZone, actual.AzureAvailabilityZone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureAvailabilityZone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorization) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization or *ClusterAuthorization", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorization) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorization) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorization", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AdminUsers, actual.AdminUsers, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminUsersNewStyle, EmptyObject: EmptyClusterAuthorizationAdminUsers, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AdminUsers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AdminGroups, actual.AdminGroups, dcl.DiffInfo{ObjectFunction: compareClusterAuthorizationAdminGroupsNewStyle, EmptyObject: EmptyClusterAuthorizationAdminGroups, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("AdminGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminUsersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminUsers) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers or *ClusterAuthorizationAdminUsers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminUsers) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminUsers) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminUsers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterAuthorizationAdminGroupsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterAuthorizationAdminGroups) + if !ok { + desiredNotPointer, ok := d.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups or *ClusterAuthorizationAdminGroups", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterAuthorizationAdminGroups) + if !ok { + actualNotPointer, ok := a.(ClusterAuthorizationAdminGroups) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterAuthorizationAdminGroups", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Group, actual.Group, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Group")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterWorkloadIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterWorkloadIdentityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig or *ClusterWorkloadIdentityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterWorkloadIdentityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterWorkloadIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterWorkloadIdentityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IssuerUri, actual.IssuerUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IssuerUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadPool, actual.WorkloadPool, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterFleetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterFleet) + if !ok { + desiredNotPointer, ok := d.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet or *ClusterFleet", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterFleet) + if !ok { + actualNotPointer, ok := a.(ClusterFleet) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterFleet", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig or *ClusterLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ComponentConfig, actual.ComponentConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterLoggingConfigComponentConfigNewStyle, EmptyObject: EmptyClusterLoggingConfigComponentConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterLoggingConfigComponentConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterLoggingConfigComponentConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig or *ClusterLoggingConfigComponentConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterLoggingConfigComponentConfig) + if !ok { + actualNotPointer, ok := a.(ClusterLoggingConfigComponentConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterLoggingConfigComponentConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableComponents, actual.EnableComponents, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("EnableComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig or *ClusterMonitoringConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedPrometheusConfig, actual.ManagedPrometheusConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterMonitoringConfigManagedPrometheusConfigNewStyle, EmptyObject: EmptyClusterMonitoringConfigManagedPrometheusConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedPrometheusConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMonitoringConfigManagedPrometheusConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig or *ClusterMonitoringConfigManagedPrometheusConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + actualNotPointer, ok := a.(ClusterMonitoringConfigManagedPrometheusConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMonitoringConfigManagedPrometheusConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAzureClusterOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Cluster) urlNormalized() *Cluster { + normalized := dcl.Copy(*r).(Cluster) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.AzureRegion = dcl.SelfLinkToName(r.AzureRegion) + normalized.ResourceGroupId = dcl.SelfLinkToName(r.ResourceGroupId) + normalized.Client = dcl.SelfLinkToName(r.Client) + normalized.Endpoint = dcl.SelfLinkToName(r.Endpoint) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAzureCluster" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Cluster resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Cluster) marshal(c *Client) ([]byte, error) { + m, err := expandCluster(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Cluster: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalCluster decodes JSON responses into the Cluster resource schema. +func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapCluster(m, c, res) +} + +func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { + + flattened := flattenCluster(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandCluster expands Cluster into a JSON request object. +func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClusters/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.AzureRegion; dcl.ValueShouldBeSent(v) { + m["azureRegion"] = v + } + if v := f.ResourceGroupId; dcl.ValueShouldBeSent(v) { + m["resourceGroupId"] = v + } + if v := f.Client; dcl.ValueShouldBeSent(v) { + m["azureClient"] = v + } + if v, err := expandClusterAzureServicesAuthentication(c, f.AzureServicesAuthentication, res); err != nil { + return nil, fmt.Errorf("error expanding AzureServicesAuthentication into azureServicesAuthentication: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["azureServicesAuthentication"] = v + } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["networking"] = v + } + if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { + return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + if v, err := expandClusterAuthorization(c, f.Authorization, res); err != nil { + return nil, fmt.Errorf("error expanding Authorization into authorization: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorization"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandClusterFleet(c, f.Fleet, res); err != nil { + return nil, fmt.Errorf("error expanding Fleet into fleet: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleet"] = v +{{- if ne $.TargetVersionName "ga" }} + } + if v, err := expandClusterLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + if v, err := expandClusterMonitoringConfig(c, f.MonitoringConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MonitoringConfig into monitoringConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoringConfig"] = v +{{- end }} + } + + return m, nil +} + +// flattenCluster flattens Cluster from a JSON request object into the +// Cluster type. +func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Cluster{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.AzureRegion = dcl.FlattenString(m["azureRegion"]) + resultRes.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + resultRes.Client = dcl.FlattenString(m["azureClient"]) + resultRes.AzureServicesAuthentication = flattenClusterAzureServicesAuthentication(c, m["azureServicesAuthentication"], res) + resultRes.Networking = flattenClusterNetworking(c, m["networking"], res) + resultRes.ControlPlane = flattenClusterControlPlane(c, m["controlPlane"], res) + resultRes.Authorization = flattenClusterAuthorization(c, m["authorization"], res) + resultRes.State = flattenClusterStateEnum(m["state"]) + resultRes.Endpoint = dcl.FlattenString(m["endpoint"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.WorkloadIdentityConfig = flattenClusterWorkloadIdentityConfig(c, m["workloadIdentityConfig"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Fleet = flattenClusterFleet(c, m["fleet"], res) +{{- if ne $.TargetVersionName "ga" }} + resultRes.LoggingConfig = flattenClusterLoggingConfig(c, m["loggingConfig"], res) + resultRes.MonitoringConfig = flattenClusterMonitoringConfig(c, m["monitoringConfig"], res) +{{- end }} + + return resultRes +} + +// expandClusterAzureServicesAuthenticationMap expands the contents of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthenticationMap(c *Client, f map[string]ClusterAzureServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAzureServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAzureServicesAuthenticationSlice expands the contents of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthenticationSlice(c *Client, f []ClusterAzureServicesAuthentication, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAzureServicesAuthentication(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAzureServicesAuthenticationMap flattens the contents of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthenticationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAzureServicesAuthentication { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAzureServicesAuthentication{} + } + + if len(a) == 0 { + return map[string]ClusterAzureServicesAuthentication{} + } + + items := make(map[string]ClusterAzureServicesAuthentication) + for k, item := range a { + items[k] = *flattenClusterAzureServicesAuthentication(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAzureServicesAuthenticationSlice flattens the contents of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthenticationSlice(c *Client, i interface{}, res *Cluster) []ClusterAzureServicesAuthentication { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAzureServicesAuthentication{} + } + + if len(a) == 0 { + return []ClusterAzureServicesAuthentication{} + } + + items := make([]ClusterAzureServicesAuthentication, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAzureServicesAuthentication(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAzureServicesAuthentication expands an instance of ClusterAzureServicesAuthentication into a JSON +// request object. +func expandClusterAzureServicesAuthentication(c *Client, f *ClusterAzureServicesAuthentication, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TenantId; !dcl.IsEmptyValueIndirect(v) { + m["tenantId"] = v + } + if v := f.ApplicationId; !dcl.IsEmptyValueIndirect(v) { + m["applicationId"] = v + } + + return m, nil +} + +// flattenClusterAzureServicesAuthentication flattens an instance of ClusterAzureServicesAuthentication from a JSON +// response object. +func flattenClusterAzureServicesAuthentication(c *Client, i interface{}, res *Cluster) *ClusterAzureServicesAuthentication { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAzureServicesAuthentication{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAzureServicesAuthentication + } + r.TenantId = dcl.FlattenString(m["tenantId"]) + r.ApplicationId = dcl.FlattenString(m["applicationId"]) + + return r +} + +// expandClusterNetworkingMap expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingMap(c *Client, f map[string]ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterNetworkingSlice expands the contents of ClusterNetworking into a JSON +// request object. +func expandClusterNetworkingSlice(c *Client, f []ClusterNetworking, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterNetworking(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterNetworkingMap flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterNetworking { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterNetworking{} + } + + if len(a) == 0 { + return map[string]ClusterNetworking{} + } + + items := make(map[string]ClusterNetworking) + for k, item := range a { + items[k] = *flattenClusterNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterNetworkingSlice flattens the contents of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworkingSlice(c *Client, i interface{}, res *Cluster) []ClusterNetworking { + a, ok := i.([]interface{}) + if !ok { + return []ClusterNetworking{} + } + + if len(a) == 0 { + return []ClusterNetworking{} + } + + items := make([]ClusterNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterNetworking(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterNetworking expands an instance of ClusterNetworking into a JSON +// request object. +func expandClusterNetworking(c *Client, f *ClusterNetworking, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VirtualNetworkId; !dcl.IsEmptyValueIndirect(v) { + m["virtualNetworkId"] = v + } + if v := f.PodAddressCidrBlocks; v != nil { + m["podAddressCidrBlocks"] = v + } + if v := f.ServiceAddressCidrBlocks; v != nil { + m["serviceAddressCidrBlocks"] = v + } + + return m, nil +} + +// flattenClusterNetworking flattens an instance of ClusterNetworking from a JSON +// response object. +func flattenClusterNetworking(c *Client, i interface{}, res *Cluster) *ClusterNetworking { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterNetworking + } + r.VirtualNetworkId = dcl.FlattenString(m["virtualNetworkId"]) + r.PodAddressCidrBlocks = dcl.FlattenStringSlice(m["podAddressCidrBlocks"]) + r.ServiceAddressCidrBlocks = dcl.FlattenStringSlice(m["serviceAddressCidrBlocks"]) + + return r +} + +// expandClusterControlPlaneMap expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneMap(c *Client, f map[string]ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSlice expands the contents of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlaneSlice(c *Client, f []ClusterControlPlane, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlane(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMap flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlane { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlane{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlane{} + } + + items := make(map[string]ClusterControlPlane) + for k, item := range a { + items[k] = *flattenClusterControlPlane(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSlice flattens the contents of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlaneSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlane { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlane{} + } + + if len(a) == 0 { + return []ClusterControlPlane{} + } + + items := make([]ClusterControlPlane, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlane(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlane expands an instance of ClusterControlPlane into a JSON +// request object. +func expandClusterControlPlane(c *Client, f *ClusterControlPlane, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.SubnetId; !dcl.IsEmptyValueIndirect(v) { + m["subnetId"] = v + } + if v := f.VmSize; !dcl.IsEmptyValueIndirect(v) { + m["vmSize"] = v + } + if v, err := expandClusterControlPlaneSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } + if v, err := expandClusterControlPlaneRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v, err := expandClusterControlPlaneMainVolume(c, f.MainVolume, res); err != nil { + return nil, fmt.Errorf("error expanding MainVolume into mainVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mainVolume"] = v + } + if v, err := expandClusterControlPlaneDatabaseEncryption(c, f.DatabaseEncryption, res); err != nil { + return nil, fmt.Errorf("error expanding DatabaseEncryption into databaseEncryption: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["databaseEncryption"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v, err := expandClusterControlPlaneProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v + } + if v, err := expandClusterControlPlaneReplicaPlacementsSlice(c, f.ReplicaPlacements, res); err != nil { + return nil, fmt.Errorf("error expanding ReplicaPlacements into replicaPlacements: %w", err) + } else if v != nil { + m["replicaPlacements"] = v + } + + return m, nil +} + +// flattenClusterControlPlane flattens an instance of ClusterControlPlane from a JSON +// response object. +func flattenClusterControlPlane(c *Client, i interface{}, res *Cluster) *ClusterControlPlane { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlane{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlane + } + r.Version = dcl.FlattenString(m["version"]) + r.SubnetId = dcl.FlattenString(m["subnetId"]) + r.VmSize = dcl.FlattenString(m["vmSize"]) + r.SshConfig = flattenClusterControlPlaneSshConfig(c, m["sshConfig"], res) + r.RootVolume = flattenClusterControlPlaneRootVolume(c, m["rootVolume"], res) + r.MainVolume = flattenClusterControlPlaneMainVolume(c, m["mainVolume"], res) + r.DatabaseEncryption = flattenClusterControlPlaneDatabaseEncryption(c, m["databaseEncryption"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.ProxyConfig = flattenClusterControlPlaneProxyConfig(c, m["proxyConfig"], res) + r.ReplicaPlacements = flattenClusterControlPlaneReplicaPlacementsSlice(c, m["replicaPlacements"], res) + + return r +} + +// expandClusterControlPlaneSshConfigMap expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigMap(c *Client, f map[string]ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneSshConfigSlice expands the contents of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfigSlice(c *Client, f []ClusterControlPlaneSshConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneSshConfigMap flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneSshConfig{} + } + + items := make(map[string]ClusterControlPlaneSshConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneSshConfigSlice flattens the contents of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneSshConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneSshConfig{} + } + + items := make([]ClusterControlPlaneSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneSshConfig expands an instance of ClusterControlPlaneSshConfig into a JSON +// request object. +func expandClusterControlPlaneSshConfig(c *Client, f *ClusterControlPlaneSshConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AuthorizedKey; !dcl.IsEmptyValueIndirect(v) { + m["authorizedKey"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneSshConfig flattens an instance of ClusterControlPlaneSshConfig from a JSON +// response object. +func flattenClusterControlPlaneSshConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneSshConfig + } + r.AuthorizedKey = dcl.FlattenString(m["authorizedKey"]) + + return r +} + +// expandClusterControlPlaneRootVolumeMap expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeMap(c *Client, f map[string]ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneRootVolumeSlice expands the contents of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolumeSlice(c *Client, f []ClusterControlPlaneRootVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneRootVolumeMap flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneRootVolume{} + } + + items := make(map[string]ClusterControlPlaneRootVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneRootVolumeSlice flattens the contents of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneRootVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneRootVolume{} + } + + items := make([]ClusterControlPlaneRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneRootVolume expands an instance of ClusterControlPlaneRootVolume into a JSON +// request object. +func expandClusterControlPlaneRootVolume(c *Client, f *ClusterControlPlaneRootVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneRootVolume flattens an instance of ClusterControlPlaneRootVolume from a JSON +// response object. +func flattenClusterControlPlaneRootVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandClusterControlPlaneMainVolumeMap expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeMap(c *Client, f map[string]ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneMainVolumeSlice expands the contents of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolumeSlice(c *Client, f []ClusterControlPlaneMainVolume, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneMainVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneMainVolumeMap flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneMainVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneMainVolume{} + } + + items := make(map[string]ClusterControlPlaneMainVolume) + for k, item := range a { + items[k] = *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneMainVolumeSlice flattens the contents of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolumeSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneMainVolume { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneMainVolume{} + } + + if len(a) == 0 { + return []ClusterControlPlaneMainVolume{} + } + + items := make([]ClusterControlPlaneMainVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneMainVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneMainVolume expands an instance of ClusterControlPlaneMainVolume into a JSON +// request object. +func expandClusterControlPlaneMainVolume(c *Client, f *ClusterControlPlaneMainVolume, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneMainVolume flattens an instance of ClusterControlPlaneMainVolume from a JSON +// response object. +func flattenClusterControlPlaneMainVolume(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneMainVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneMainVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneMainVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandClusterControlPlaneDatabaseEncryptionMap expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionMap(c *Client, f map[string]ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneDatabaseEncryptionSlice expands the contents of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryptionSlice(c *Client, f []ClusterControlPlaneDatabaseEncryption, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneDatabaseEncryption(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneDatabaseEncryptionMap flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneDatabaseEncryption { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneDatabaseEncryption{} + } + + items := make(map[string]ClusterControlPlaneDatabaseEncryption) + for k, item := range a { + items[k] = *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneDatabaseEncryptionSlice flattens the contents of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryptionSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneDatabaseEncryption { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneDatabaseEncryption{} + } + + if len(a) == 0 { + return []ClusterControlPlaneDatabaseEncryption{} + } + + items := make([]ClusterControlPlaneDatabaseEncryption, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneDatabaseEncryption(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneDatabaseEncryption expands an instance of ClusterControlPlaneDatabaseEncryption into a JSON +// request object. +func expandClusterControlPlaneDatabaseEncryption(c *Client, f *ClusterControlPlaneDatabaseEncryption, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KeyId; !dcl.IsEmptyValueIndirect(v) { + m["keyId"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneDatabaseEncryption flattens an instance of ClusterControlPlaneDatabaseEncryption from a JSON +// response object. +func flattenClusterControlPlaneDatabaseEncryption(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneDatabaseEncryption { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneDatabaseEncryption{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneDatabaseEncryption + } + r.KeyId = dcl.FlattenString(m["keyId"]) + + return r +} + +// expandClusterControlPlaneProxyConfigMap expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigMap(c *Client, f map[string]ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneProxyConfigSlice expands the contents of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfigSlice(c *Client, f []ClusterControlPlaneProxyConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneProxyConfigMap flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneProxyConfig{} + } + + items := make(map[string]ClusterControlPlaneProxyConfig) + for k, item := range a { + items[k] = *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneProxyConfigSlice flattens the contents of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneProxyConfig{} + } + + if len(a) == 0 { + return []ClusterControlPlaneProxyConfig{} + } + + items := make([]ClusterControlPlaneProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneProxyConfig expands an instance of ClusterControlPlaneProxyConfig into a JSON +// request object. +func expandClusterControlPlaneProxyConfig(c *Client, f *ClusterControlPlaneProxyConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceGroupId; !dcl.IsEmptyValueIndirect(v) { + m["resourceGroupId"] = v + } + if v := f.SecretId; !dcl.IsEmptyValueIndirect(v) { + m["secretId"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneProxyConfig flattens an instance of ClusterControlPlaneProxyConfig from a JSON +// response object. +func flattenClusterControlPlaneProxyConfig(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneProxyConfig + } + r.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + r.SecretId = dcl.FlattenString(m["secretId"]) + + return r +} + +// expandClusterControlPlaneReplicaPlacementsMap expands the contents of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacementsMap(c *Client, f map[string]ClusterControlPlaneReplicaPlacements, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterControlPlaneReplicaPlacements(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterControlPlaneReplicaPlacementsSlice expands the contents of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacementsSlice(c *Client, f []ClusterControlPlaneReplicaPlacements, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterControlPlaneReplicaPlacements(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterControlPlaneReplicaPlacementsMap flattens the contents of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacementsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterControlPlaneReplicaPlacements { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterControlPlaneReplicaPlacements{} + } + + if len(a) == 0 { + return map[string]ClusterControlPlaneReplicaPlacements{} + } + + items := make(map[string]ClusterControlPlaneReplicaPlacements) + for k, item := range a { + items[k] = *flattenClusterControlPlaneReplicaPlacements(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterControlPlaneReplicaPlacementsSlice flattens the contents of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacementsSlice(c *Client, i interface{}, res *Cluster) []ClusterControlPlaneReplicaPlacements { + a, ok := i.([]interface{}) + if !ok { + return []ClusterControlPlaneReplicaPlacements{} + } + + if len(a) == 0 { + return []ClusterControlPlaneReplicaPlacements{} + } + + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterControlPlaneReplicaPlacements(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterControlPlaneReplicaPlacements expands an instance of ClusterControlPlaneReplicaPlacements into a JSON +// request object. +func expandClusterControlPlaneReplicaPlacements(c *Client, f *ClusterControlPlaneReplicaPlacements, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SubnetId; !dcl.IsEmptyValueIndirect(v) { + m["subnetId"] = v + } + if v := f.AzureAvailabilityZone; !dcl.IsEmptyValueIndirect(v) { + m["azureAvailabilityZone"] = v + } + + return m, nil +} + +// flattenClusterControlPlaneReplicaPlacements flattens an instance of ClusterControlPlaneReplicaPlacements from a JSON +// response object. +func flattenClusterControlPlaneReplicaPlacements(c *Client, i interface{}, res *Cluster) *ClusterControlPlaneReplicaPlacements { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterControlPlaneReplicaPlacements{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterControlPlaneReplicaPlacements + } + r.SubnetId = dcl.FlattenString(m["subnetId"]) + r.AzureAvailabilityZone = dcl.FlattenString(m["azureAvailabilityZone"]) + + return r +} + +// expandClusterAuthorizationMap expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationMap(c *Client, f map[string]ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationSlice expands the contents of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorizationSlice(c *Client, f []ClusterAuthorization, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorization(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationMap flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorization { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorization{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorization{} + } + + items := make(map[string]ClusterAuthorization) + for k, item := range a { + items[k] = *flattenClusterAuthorization(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationSlice flattens the contents of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorizationSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorization { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorization{} + } + + if len(a) == 0 { + return []ClusterAuthorization{} + } + + items := make([]ClusterAuthorization, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorization(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorization expands an instance of ClusterAuthorization into a JSON +// request object. +func expandClusterAuthorization(c *Client, f *ClusterAuthorization, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterAuthorizationAdminUsersSlice(c, f.AdminUsers, res); err != nil { + return nil, fmt.Errorf("error expanding AdminUsers into adminUsers: %w", err) + } else if v != nil { + m["adminUsers"] = v + } + if v, err := expandClusterAuthorizationAdminGroupsSlice(c, f.AdminGroups, res); err != nil { + return nil, fmt.Errorf("error expanding AdminGroups into adminGroups: %w", err) + } else if v != nil { + m["adminGroups"] = v + } + + return m, nil +} + +// flattenClusterAuthorization flattens an instance of ClusterAuthorization from a JSON +// response object. +func flattenClusterAuthorization(c *Client, i interface{}, res *Cluster) *ClusterAuthorization { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorization{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorization + } + r.AdminUsers = flattenClusterAuthorizationAdminUsersSlice(c, m["adminUsers"], res) + r.AdminGroups = flattenClusterAuthorizationAdminGroupsSlice(c, m["adminGroups"], res) + + return r +} + +// expandClusterAuthorizationAdminUsersMap expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersMap(c *Client, f map[string]ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminUsersSlice expands the contents of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsersSlice(c *Client, f []ClusterAuthorizationAdminUsers, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminUsers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminUsersMap flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminUsers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminUsers{} + } + + items := make(map[string]ClusterAuthorizationAdminUsers) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminUsersSlice flattens the contents of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsersSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminUsers { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminUsers{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminUsers{} + } + + items := make([]ClusterAuthorizationAdminUsers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminUsers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminUsers expands an instance of ClusterAuthorizationAdminUsers into a JSON +// request object. +func expandClusterAuthorizationAdminUsers(c *Client, f *ClusterAuthorizationAdminUsers, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Username; !dcl.IsEmptyValueIndirect(v) { + m["username"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminUsers flattens an instance of ClusterAuthorizationAdminUsers from a JSON +// response object. +func flattenClusterAuthorizationAdminUsers(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminUsers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminUsers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminUsers + } + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandClusterAuthorizationAdminGroupsMap expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsMap(c *Client, f map[string]ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterAuthorizationAdminGroupsSlice expands the contents of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroupsSlice(c *Client, f []ClusterAuthorizationAdminGroups, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterAuthorizationAdminGroups(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterAuthorizationAdminGroupsMap flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterAuthorizationAdminGroups { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return map[string]ClusterAuthorizationAdminGroups{} + } + + items := make(map[string]ClusterAuthorizationAdminGroups) + for k, item := range a { + items[k] = *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterAuthorizationAdminGroupsSlice flattens the contents of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroupsSlice(c *Client, i interface{}, res *Cluster) []ClusterAuthorizationAdminGroups { + a, ok := i.([]interface{}) + if !ok { + return []ClusterAuthorizationAdminGroups{} + } + + if len(a) == 0 { + return []ClusterAuthorizationAdminGroups{} + } + + items := make([]ClusterAuthorizationAdminGroups, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterAuthorizationAdminGroups(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterAuthorizationAdminGroups expands an instance of ClusterAuthorizationAdminGroups into a JSON +// request object. +func expandClusterAuthorizationAdminGroups(c *Client, f *ClusterAuthorizationAdminGroups, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Group; !dcl.IsEmptyValueIndirect(v) { + m["group"] = v + } + + return m, nil +} + +// flattenClusterAuthorizationAdminGroups flattens an instance of ClusterAuthorizationAdminGroups from a JSON +// response object. +func flattenClusterAuthorizationAdminGroups(c *Client, i interface{}, res *Cluster) *ClusterAuthorizationAdminGroups { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterAuthorizationAdminGroups{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterAuthorizationAdminGroups + } + r.Group = dcl.FlattenString(m["group"]) + + return r +} + +// expandClusterWorkloadIdentityConfigMap expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigMap(c *Client, f map[string]ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterWorkloadIdentityConfigSlice expands the contents of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfigSlice(c *Client, f []ClusterWorkloadIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterWorkloadIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterWorkloadIdentityConfigMap flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterWorkloadIdentityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterWorkloadIdentityConfig{} + } + + items := make(map[string]ClusterWorkloadIdentityConfig) + for k, item := range a { + items[k] = *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterWorkloadIdentityConfigSlice flattens the contents of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterWorkloadIdentityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterWorkloadIdentityConfig{} + } + + if len(a) == 0 { + return []ClusterWorkloadIdentityConfig{} + } + + items := make([]ClusterWorkloadIdentityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterWorkloadIdentityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterWorkloadIdentityConfig expands an instance of ClusterWorkloadIdentityConfig into a JSON +// request object. +func expandClusterWorkloadIdentityConfig(c *Client, f *ClusterWorkloadIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IssuerUri; !dcl.IsEmptyValueIndirect(v) { + m["issuerUri"] = v + } + if v := f.WorkloadPool; !dcl.IsEmptyValueIndirect(v) { + m["workloadPool"] = v + } + if v := f.IdentityProvider; !dcl.IsEmptyValueIndirect(v) { + m["identityProvider"] = v + } + + return m, nil +} + +// flattenClusterWorkloadIdentityConfig flattens an instance of ClusterWorkloadIdentityConfig from a JSON +// response object. +func flattenClusterWorkloadIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterWorkloadIdentityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterWorkloadIdentityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterWorkloadIdentityConfig + } + r.IssuerUri = dcl.FlattenString(m["issuerUri"]) + r.WorkloadPool = dcl.FlattenString(m["workloadPool"]) + r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) + + return r +} + +// expandClusterFleetMap expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetMap(c *Client, f map[string]ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterFleetSlice expands the contents of ClusterFleet into a JSON +// request object. +func expandClusterFleetSlice(c *Client, f []ClusterFleet, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterFleet(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterFleetMap flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterFleet { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterFleet{} + } + + if len(a) == 0 { + return map[string]ClusterFleet{} + } + + items := make(map[string]ClusterFleet) + for k, item := range a { + items[k] = *flattenClusterFleet(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterFleetSlice flattens the contents of ClusterFleet from a JSON +// response object. +func flattenClusterFleetSlice(c *Client, i interface{}, res *Cluster) []ClusterFleet { + a, ok := i.([]interface{}) + if !ok { + return []ClusterFleet{} + } + + if len(a) == 0 { + return []ClusterFleet{} + } + + items := make([]ClusterFleet, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterFleet(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterFleet expands an instance of ClusterFleet into a JSON +// request object. +func expandClusterFleet(c *Client, f *ClusterFleet, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := dcl.DeriveField("projects/%s", f.Project, dcl.SelfLinkToName(f.Project)); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenClusterFleet flattens an instance of ClusterFleet from a JSON +// response object. +func flattenClusterFleet(c *Client, i interface{}, res *Cluster) *ClusterFleet { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterFleet{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterFleet + } + r.Project = dcl.FlattenString(m["project"]) + r.Membership = dcl.FlattenString(m["membership"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandClusterLoggingConfigMap expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigMap(c *Client, f map[string]ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigSlice expands the contents of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfigSlice(c *Client, f []ClusterLoggingConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigMap flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfig{} + } + + items := make(map[string]ClusterLoggingConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigSlice flattens the contents of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfig{} + } + + items := make([]ClusterLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfig expands an instance of ClusterLoggingConfig into a JSON +// request object. +func expandClusterLoggingConfig(c *Client, f *ClusterLoggingConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterLoggingConfigComponentConfig(c, f.ComponentConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ComponentConfig into componentConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["componentConfig"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfig flattens an instance of ClusterLoggingConfig from a JSON +// response object. +func flattenClusterLoggingConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfig + } + r.ComponentConfig = flattenClusterLoggingConfigComponentConfig(c, m["componentConfig"], res) + + return r +} + +// expandClusterLoggingConfigComponentConfigMap expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigMap(c *Client, f map[string]ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterLoggingConfigComponentConfigSlice expands the contents of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfigSlice(c *Client, f []ClusterLoggingConfigComponentConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterLoggingConfigComponentConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterLoggingConfigComponentConfigMap flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfig{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfig) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigSlice flattens the contents of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfig{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfig{} + } + + items := make([]ClusterLoggingConfigComponentConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterLoggingConfigComponentConfig expands an instance of ClusterLoggingConfigComponentConfig into a JSON +// request object. +func expandClusterLoggingConfigComponentConfig(c *Client, f *ClusterLoggingConfigComponentConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableComponents; v != nil { + m["enableComponents"] = v + } + + return m, nil +} + +// flattenClusterLoggingConfigComponentConfig flattens an instance of ClusterLoggingConfigComponentConfig from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfig(c *Client, i interface{}, res *Cluster) *ClusterLoggingConfigComponentConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterLoggingConfigComponentConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterLoggingConfigComponentConfig + } + r.EnableComponents = flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c, m["enableComponents"], res) + + return r +} + +// expandClusterMonitoringConfigMap expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigMap(c *Client, f map[string]ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigSlice expands the contents of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfigSlice(c *Client, f []ClusterMonitoringConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigMap flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfig{} + } + + items := make(map[string]ClusterMonitoringConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigSlice flattens the contents of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfig{} + } + + items := make([]ClusterMonitoringConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfig expands an instance of ClusterMonitoringConfig into a JSON +// request object. +func expandClusterMonitoringConfig(c *Client, f *ClusterMonitoringConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, f.ManagedPrometheusConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedPrometheusConfig into managedPrometheusConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedPrometheusConfig"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfig flattens an instance of ClusterMonitoringConfig from a JSON +// response object. +func flattenClusterMonitoringConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfig + } + r.ManagedPrometheusConfig = flattenClusterMonitoringConfigManagedPrometheusConfig(c, m["managedPrometheusConfig"], res) + + return r +} + +// expandClusterMonitoringConfigManagedPrometheusConfigMap expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, f map[string]ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMonitoringConfigManagedPrometheusConfigSlice expands the contents of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, f []ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMonitoringConfigManagedPrometheusConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigMap flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return map[string]ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make(map[string]ClusterMonitoringConfigManagedPrometheusConfig) + for k, item := range a { + items[k] = *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMonitoringConfigManagedPrometheusConfigSlice flattens the contents of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterMonitoringConfigManagedPrometheusConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + if len(a) == 0 { + return []ClusterMonitoringConfigManagedPrometheusConfig{} + } + + items := make([]ClusterMonitoringConfigManagedPrometheusConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMonitoringConfigManagedPrometheusConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMonitoringConfigManagedPrometheusConfig expands an instance of ClusterMonitoringConfigManagedPrometheusConfig into a JSON +// request object. +func expandClusterMonitoringConfigManagedPrometheusConfig(c *Client, f *ClusterMonitoringConfigManagedPrometheusConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + + return m, nil +} + +// flattenClusterMonitoringConfigManagedPrometheusConfig flattens an instance of ClusterMonitoringConfigManagedPrometheusConfig from a JSON +// response object. +func flattenClusterMonitoringConfigManagedPrometheusConfig(c *Client, i interface{}, res *Cluster) *ClusterMonitoringConfigManagedPrometheusConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMonitoringConfigManagedPrometheusConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMonitoringConfigManagedPrometheusConfig + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + + return r +} + +{{- end }} +// flattenClusterStateEnumMap flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStateEnum{} + } + + items := make(map[string]ClusterStateEnum) + for k, item := range a { + items[k] = *flattenClusterStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStateEnumSlice flattens the contents of ClusterStateEnum from a JSON +// response object. +func flattenClusterStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStateEnum{} + } + + if len(a) == 0 { + return []ClusterStateEnum{} + } + + items := make([]ClusterStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStateEnum with the same value as that string. +func flattenClusterStateEnum(i interface{}) *ClusterStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStateEnumRef(s) +{{- if ne $.TargetVersionName "ga" }} +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make(map[string]ClusterLoggingConfigComponentConfigEnableComponentsEnum) + for k, item := range a { + items[k] = *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice flattens the contents of ClusterLoggingConfigComponentConfigEnableComponentsEnum from a JSON +// response object. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + if len(a) == 0 { + return []ClusterLoggingConfigComponentConfigEnableComponentsEnum{} + } + + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterLoggingConfigComponentConfigEnableComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterLoggingConfigComponentConfigEnableComponentsEnum with the same value as that string. +func flattenClusterLoggingConfigComponentConfigEnableComponentsEnum(i interface{}) *ClusterLoggingConfigComponentConfigEnableComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(s) +{{- end }} +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Cluster) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalCluster(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clusterDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clusterApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clusterDiff + // For each operation name, create a clusterDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { + switch opName { + + case "updateClusterUpdateAzureClusterOperation": + return &updateClusterUpdateAzureClusterOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClusterFields(r *Cluster) error { + vAzureServicesAuthentication := r.AzureServicesAuthentication + if vAzureServicesAuthentication == nil { + // note: explicitly not the empty object. + vAzureServicesAuthentication = &ClusterAzureServicesAuthentication{} + } + if err := extractClusterAzureServicesAuthenticationFields(r, vAzureServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAzureServicesAuthentication) { + r.AzureServicesAuthentication = vAzureServicesAuthentication + } + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := extractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := extractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := extractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := extractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := extractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := extractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := extractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + return nil +} +func extractClusterAzureServicesAuthenticationFields(r *Cluster, o *ClusterAzureServicesAuthentication) error { + return nil +} +func extractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func extractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func extractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func extractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func extractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func extractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func extractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +func extractClusterControlPlaneReplicaPlacementsFields(r *Cluster, o *ClusterControlPlaneReplicaPlacements) error { + return nil +} +func extractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func extractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func extractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func extractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func extractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func extractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func extractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func extractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} + +func postReadExtractClusterFields(r *Cluster) error { + vAzureServicesAuthentication := r.AzureServicesAuthentication + if vAzureServicesAuthentication == nil { + // note: explicitly not the empty object. + vAzureServicesAuthentication = &ClusterAzureServicesAuthentication{} + } + if err := postReadExtractClusterAzureServicesAuthenticationFields(r, vAzureServicesAuthentication); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAzureServicesAuthentication) { + r.AzureServicesAuthentication = vAzureServicesAuthentication + } + vNetworking := r.Networking + if vNetworking == nil { + // note: explicitly not the empty object. + vNetworking = &ClusterNetworking{} + } + if err := postReadExtractClusterNetworkingFields(r, vNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNetworking) { + r.Networking = vNetworking + } + vControlPlane := r.ControlPlane + if vControlPlane == nil { + // note: explicitly not the empty object. + vControlPlane = &ClusterControlPlane{} + } + if err := postReadExtractClusterControlPlaneFields(r, vControlPlane); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vControlPlane) { + r.ControlPlane = vControlPlane + } + vAuthorization := r.Authorization + if vAuthorization == nil { + // note: explicitly not the empty object. + vAuthorization = &ClusterAuthorization{} + } + if err := postReadExtractClusterAuthorizationFields(r, vAuthorization); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorization) { + r.Authorization = vAuthorization + } + vWorkloadIdentityConfig := r.WorkloadIdentityConfig + if vWorkloadIdentityConfig == nil { + // note: explicitly not the empty object. + vWorkloadIdentityConfig = &ClusterWorkloadIdentityConfig{} + } + if err := postReadExtractClusterWorkloadIdentityConfigFields(r, vWorkloadIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkloadIdentityConfig) { + r.WorkloadIdentityConfig = vWorkloadIdentityConfig + } + vFleet := r.Fleet + if vFleet == nil { + // note: explicitly not the empty object. + vFleet = &ClusterFleet{} + } + if err := postReadExtractClusterFleetFields(r, vFleet); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleet) { + r.Fleet = vFleet + } +{{- if ne $.TargetVersionName "ga" }} + vLoggingConfig := r.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &ClusterLoggingConfig{} + } + if err := postReadExtractClusterLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + r.LoggingConfig = vLoggingConfig + } + vMonitoringConfig := r.MonitoringConfig + if vMonitoringConfig == nil { + // note: explicitly not the empty object. + vMonitoringConfig = &ClusterMonitoringConfig{} + } + if err := postReadExtractClusterMonitoringConfigFields(r, vMonitoringConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoringConfig) { + r.MonitoringConfig = vMonitoringConfig + } +{{- end }} + return nil +} +func postReadExtractClusterAzureServicesAuthenticationFields(r *Cluster, o *ClusterAzureServicesAuthentication) error { + return nil +} +func postReadExtractClusterNetworkingFields(r *Cluster, o *ClusterNetworking) error { + return nil +} +func postReadExtractClusterControlPlaneFields(r *Cluster, o *ClusterControlPlane) error { + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &ClusterControlPlaneSshConfig{} + } + if err := extractClusterControlPlaneSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &ClusterControlPlaneRootVolume{} + } + if err := extractClusterControlPlaneRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vMainVolume := o.MainVolume + if vMainVolume == nil { + // note: explicitly not the empty object. + vMainVolume = &ClusterControlPlaneMainVolume{} + } + if err := extractClusterControlPlaneMainVolumeFields(r, vMainVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMainVolume) { + o.MainVolume = vMainVolume + } + vDatabaseEncryption := o.DatabaseEncryption + if vDatabaseEncryption == nil { + // note: explicitly not the empty object. + vDatabaseEncryption = &ClusterControlPlaneDatabaseEncryption{} + } + if err := extractClusterControlPlaneDatabaseEncryptionFields(r, vDatabaseEncryption); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDatabaseEncryption) { + o.DatabaseEncryption = vDatabaseEncryption + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &ClusterControlPlaneProxyConfig{} + } + if err := extractClusterControlPlaneProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func postReadExtractClusterControlPlaneSshConfigFields(r *Cluster, o *ClusterControlPlaneSshConfig) error { + return nil +} +func postReadExtractClusterControlPlaneRootVolumeFields(r *Cluster, o *ClusterControlPlaneRootVolume) error { + return nil +} +func postReadExtractClusterControlPlaneMainVolumeFields(r *Cluster, o *ClusterControlPlaneMainVolume) error { + return nil +} +func postReadExtractClusterControlPlaneDatabaseEncryptionFields(r *Cluster, o *ClusterControlPlaneDatabaseEncryption) error { + return nil +} +func postReadExtractClusterControlPlaneProxyConfigFields(r *Cluster, o *ClusterControlPlaneProxyConfig) error { + return nil +} +func postReadExtractClusterControlPlaneReplicaPlacementsFields(r *Cluster, o *ClusterControlPlaneReplicaPlacements) error { + return nil +} +func postReadExtractClusterAuthorizationFields(r *Cluster, o *ClusterAuthorization) error { + return nil +} +func postReadExtractClusterAuthorizationAdminUsersFields(r *Cluster, o *ClusterAuthorizationAdminUsers) error { + return nil +} +func postReadExtractClusterAuthorizationAdminGroupsFields(r *Cluster, o *ClusterAuthorizationAdminGroups) error { + return nil +} +func postReadExtractClusterWorkloadIdentityConfigFields(r *Cluster, o *ClusterWorkloadIdentityConfig) error { + return nil +} +func postReadExtractClusterFleetFields(r *Cluster, o *ClusterFleet) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractClusterLoggingConfigFields(r *Cluster, o *ClusterLoggingConfig) error { + vComponentConfig := o.ComponentConfig + if vComponentConfig == nil { + // note: explicitly not the empty object. + vComponentConfig = &ClusterLoggingConfigComponentConfig{} + } + if err := extractClusterLoggingConfigComponentConfigFields(r, vComponentConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vComponentConfig) { + o.ComponentConfig = vComponentConfig + } + return nil +} +func postReadExtractClusterLoggingConfigComponentConfigFields(r *Cluster, o *ClusterLoggingConfigComponentConfig) error { + return nil +} +func postReadExtractClusterMonitoringConfigFields(r *Cluster, o *ClusterMonitoringConfig) error { + vManagedPrometheusConfig := o.ManagedPrometheusConfig + if vManagedPrometheusConfig == nil { + // note: explicitly not the empty object. + vManagedPrometheusConfig = &ClusterMonitoringConfigManagedPrometheusConfig{} + } + if err := extractClusterMonitoringConfigManagedPrometheusConfigFields(r, vManagedPrometheusConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedPrometheusConfig) { + o.ManagedPrometheusConfig = vManagedPrometheusConfig + } + return nil +} +func postReadExtractClusterMonitoringConfigManagedPrometheusConfigFields(r *Cluster, o *ClusterMonitoringConfigManagedPrometheusConfig) error { + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl new file mode 100644 index 000000000000..943ded3bf1e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl @@ -0,0 +1,658 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLClusterSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "ContainerAzure/Cluster", + Description: "An Anthos cluster running on Azure.", + StructName: "Cluster", + Reference: &dcl.Link{ + Text: "API reference", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClusters", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Multicloud overview", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Cluster": &dcl.Component{ + Title: "Cluster", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "azureRegion", + "resourceGroupId", + "networking", + "controlPlane", + "authorization", + "project", + "location", + "fleet", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + Immutable: true, + }, + "authorization": &dcl.Property{ + Type: "object", + GoName: "Authorization", + GoType: "ClusterAuthorization", + Description: "Configuration related to the cluster RBAC settings.", + Required: []string{ + "adminUsers", + }, + Properties: map[string]*dcl.Property{ + "adminGroups": &dcl.Property{ + Type: "array", + GoName: "AdminGroups", + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterAuthorizationAdminGroups", + Required: []string{ + "group", + }, + Properties: map[string]*dcl.Property{ + "group": &dcl.Property{ + Type: "string", + GoName: "Group", + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + }, + }, + "adminUsers": &dcl.Property{ + Type: "array", + GoName: "AdminUsers", + Description: "Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterAuthorizationAdminUsers", + Required: []string{ + "username", + }, + Properties: map[string]*dcl.Property{ + "username": &dcl.Property{ + Type: "string", + GoName: "Username", + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + }, + }, + }, + }, + "azureRegion": &dcl.Property{ + Type: "string", + GoName: "AzureRegion", + Description: "The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region.", + Immutable: true, + }, + "azureServicesAuthentication": &dcl.Property{ + Type: "object", + GoName: "AzureServicesAuthentication", + GoType: "ClusterAzureServicesAuthentication", + Description: "Azure authentication configuration for management of Azure resources", + Conflicts: []string{ + "client", + }, + Required: []string{ + "tenantId", + "applicationId", + }, + Properties: map[string]*dcl.Property{ + "applicationId": &dcl.Property{ + Type: "string", + GoName: "ApplicationId", + Description: "The Azure Active Directory Application ID for Authentication configuration.", + }, + "tenantId": &dcl.Property{ + Type: "string", + GoName: "TenantId", + Description: "The Azure Active Directory Tenant ID for Authentication configuration.", + }, + }, + }, + "client": &dcl.Property{ + Type: "string", + GoName: "Client", + Description: "Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", + Conflicts: []string{ + "azureServicesAuthentication", + }, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "ContainerAzure/AzureClient", + Field: "name", + }, + }, + }, + "controlPlane": &dcl.Property{ + Type: "object", + GoName: "ControlPlane", + GoType: "ClusterControlPlane", + Description: "Configuration related to the cluster control plane.", + Required: []string{ + "version", + "subnetId", + "sshConfig", + }, + Properties: map[string]*dcl.Property{ + "databaseEncryption": &dcl.Property{ + Type: "object", + GoName: "DatabaseEncryption", + GoType: "ClusterControlPlaneDatabaseEncryption", + Description: "Optional. Configuration related to application-layer secrets encryption.", + Immutable: true, + Required: []string{ + "keyId", + }, + Properties: map[string]*dcl.Property{ + "keyId": &dcl.Property{ + Type: "string", + GoName: "KeyId", + Description: "The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported.", + Immutable: true, + }, + }, + }, + "mainVolume": &dcl.Property{ + Type: "object", + GoName: "MainVolume", + GoType: "ClusterControlPlaneMainVolume", + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "proxyConfig": &dcl.Property{ + Type: "object", + GoName: "ProxyConfig", + GoType: "ClusterControlPlaneProxyConfig", + Description: "Proxy configuration for outbound HTTP(S) traffic.", + Immutable: true, + Required: []string{ + "resourceGroupId", + "secretId", + }, + Properties: map[string]*dcl.Property{ + "resourceGroupId": &dcl.Property{ + Type: "string", + GoName: "ResourceGroupId", + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + Immutable: true, + }, + "secretId": &dcl.Property{ + Type: "string", + GoName: "SecretId", + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + Immutable: true, + }, + }, + }, + "replicaPlacements": &dcl.Property{ + Type: "array", + GoName: "ReplicaPlacements", + Description: "Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterControlPlaneReplicaPlacements", + Required: []string{ + "subnetId", + "azureAvailabilityZone", + }, + Properties: map[string]*dcl.Property{ + "azureAvailabilityZone": &dcl.Property{ + Type: "string", + GoName: "AzureAvailabilityZone", + Description: "For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk.", + Immutable: true, + }, + "subnetId": &dcl.Property{ + Type: "string", + GoName: "SubnetId", + Description: "For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration.", + Immutable: true, + }, + }, + }, + }, + "rootVolume": &dcl.Property{ + Type: "object", + GoName: "RootVolume", + GoType: "ClusterControlPlaneRootVolume", + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "sshConfig": &dcl.Property{ + Type: "object", + GoName: "SshConfig", + GoType: "ClusterControlPlaneSshConfig", + Description: "SSH configuration for how to access the underlying control plane machines.", + Required: []string{ + "authorizedKey", + }, + Properties: map[string]*dcl.Property{ + "authorizedKey": &dcl.Property{ + Type: "string", + GoName: "AuthorizedKey", + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + }, + "subnetId": &dcl.Property{ + Type: "string", + GoName: "SubnetId", + Description: "The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`.", + Immutable: true, + }, + "tags": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Tags", + Description: "Optional. A set of tags to apply to all underlying control plane Azure resources.", + Immutable: true, + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig.", + }, + "vmSize": &dcl.Property{ + Type: "string", + GoName: "VmSize", + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`.", + ServerDefault: true, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time at which this cluster was created.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + "endpoint": &dcl.Property{ + Type: "string", + GoName: "Endpoint", + ReadOnly: true, + Description: "Output only. The endpoint of the cluster's API server.", + Immutable: true, + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "fleet": &dcl.Property{ + Type: "object", + GoName: "Fleet", + GoType: "ClusterFleet", + Description: "Fleet configuration.", + Immutable: true, + Required: []string{ + "project", + }, + Properties: map[string]*dcl.Property{ + "membership": &dcl.Property{ + Type: "string", + GoName: "Membership", + ReadOnly: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The number of the Fleet host project where this cluster will be registered.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + HasLongForm: true, + }, + }, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, +{{- if ne $.TargetVersionName "ga" }} + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "ClusterLoggingConfig", + Description: "Logging configuration.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "componentConfig": &dcl.Property{ + Type: "object", + GoName: "ComponentConfig", + GoType: "ClusterLoggingConfigComponentConfig", + Description: "Configuration of the logging components.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "enableComponents": &dcl.Property{ + Type: "array", + GoName: "EnableComponents", + Description: "Components of the logging configuration to be enabled.", + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", + Enum: []string{ + "COMPONENT_UNSPECIFIED", + "SYSTEM_COMPONENTS", + "WORKLOADS", + }, + }, + }, + }, + }, + }, + }, + "monitoringConfig": &dcl.Property{ + Type: "object", + GoName: "MonitoringConfig", + GoType: "ClusterMonitoringConfig", + Description: "Monitoring configuration.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "managedPrometheusConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedPrometheusConfig", + GoType: "ClusterMonitoringConfigManagedPrometheusConfig", + Description: "Configuration of the Google Cloud Managed Service for Prometheus.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Configuration of the enable Managed Collection.", + ServerDefault: true, + }, + }, + }, + }, + }, +{{- end }} + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of this resource.", + Immutable: true, + HasLongForm: true, + }, + "networking": &dcl.Property{ + Type: "object", + GoName: "Networking", + GoType: "ClusterNetworking", + Description: "Cluster-wide networking configuration.", + Immutable: true, + Required: []string{ + "virtualNetworkId", + "podAddressCidrBlocks", + "serviceAddressCidrBlocks", + }, + Properties: map[string]*dcl.Property{ + "podAddressCidrBlocks": &dcl.Property{ + Type: "array", + GoName: "PodAddressCidrBlocks", + Description: "The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "serviceAddressCidrBlocks": &dcl.Property{ + Type: "array", + GoName: "ServiceAddressCidrBlocks", + Description: "The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "virtualNetworkId": &dcl.Property{ + Type: "string", + GoName: "VirtualNetworkId", + Description: "The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation.", + Immutable: true, + }, + }, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "reconciling": &dcl.Property{ + Type: "boolean", + GoName: "Reconciling", + ReadOnly: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + Immutable: true, + }, + "resourceGroupId": &dcl.Property{ + Type: "string", + GoName: "ResourceGroupId", + Description: "The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*`", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "ClusterStateEnum", + ReadOnly: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR", + "DEGRADED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A globally unique identifier for the cluster.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time at which this cluster was last updated.", + Immutable: true, + }, + "workloadIdentityConfig": &dcl.Property{ + Type: "object", + GoName: "WorkloadIdentityConfig", + GoType: "ClusterWorkloadIdentityConfig", + ReadOnly: true, + Description: "Output only. Workload Identity settings.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "identityProvider": &dcl.Property{ + Type: "string", + GoName: "IdentityProvider", + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + Immutable: true, + }, + "issuerUri": &dcl.Property{ + Type: "string", + GoName: "IssuerUri", + Description: "The OIDC issuer URL for this cluster.", + Immutable: true, + }, + "workloadPool": &dcl.Property{ + Type: "string", + GoName: "WorkloadPool", + Description: "The Workload Identity Pool associated to the cluster.", + Immutable: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl new file mode 100644 index 000000000000..fbd7714e9269 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl @@ -0,0 +1,772 @@ +package containerazure + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type NodePool struct { + Name *string `json:"name"` + Version *string `json:"version"` + Config *NodePoolConfig `json:"config"` + SubnetId *string `json:"subnetId"` + Autoscaling *NodePoolAutoscaling `json:"autoscaling"` + State *NodePoolStateEnum `json:"state"` + Uid *string `json:"uid"` + Reconciling *bool `json:"reconciling"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Etag *string `json:"etag"` + Annotations map[string]string `json:"annotations"` + MaxPodsConstraint *NodePoolMaxPodsConstraint `json:"maxPodsConstraint"` + Management *NodePoolManagement `json:"management"` + AzureAvailabilityZone *string `json:"azureAvailabilityZone"` + Project *string `json:"project"` + Location *string `json:"location"` + Cluster *string `json:"cluster"` +} + +func (r *NodePool) String() string { + return dcl.SprintResource(r) +} + +// The enum NodePoolStateEnum. +type NodePoolStateEnum string + +// NodePoolStateEnumRef returns a *NodePoolStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func NodePoolStateEnumRef(s string) *NodePoolStateEnum { + v := NodePoolStateEnum(s) + return &v +} + +func (v NodePoolStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "PROVISIONING", "RUNNING", "RECONCILING", "STOPPING", "ERROR", "DEGRADED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "NodePoolStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type NodePoolConfig struct { + empty bool `json:"-"` + VmSize *string `json:"vmSize"` + RootVolume *NodePoolConfigRootVolume `json:"rootVolume"` + Tags map[string]string `json:"tags"` + Labels map[string]string `json:"labels"` + SshConfig *NodePoolConfigSshConfig `json:"sshConfig"` +{{- if ne $.TargetVersionName "ga" }} + ImageType *string `json:"imageType"` +{{- end }} + ProxyConfig *NodePoolConfigProxyConfig `json:"proxyConfig"` +} + +type jsonNodePoolConfig NodePoolConfig + +func (r *NodePoolConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfig + } else { + + r.VmSize = res.VmSize + + r.RootVolume = res.RootVolume + + r.Tags = res.Tags + + r.Labels = res.Labels + + r.SshConfig = res.SshConfig +{{- if ne $.TargetVersionName "ga" }} + + r.ImageType = res.ImageType +{{- end }} + + r.ProxyConfig = res.ProxyConfig + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfig *NodePoolConfig = &NodePoolConfig{empty: true} + +func (r *NodePoolConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigRootVolume struct { + empty bool `json:"-"` + SizeGib *int64 `json:"sizeGib"` +} + +type jsonNodePoolConfigRootVolume NodePoolConfigRootVolume + +func (r *NodePoolConfigRootVolume) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigRootVolume + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigRootVolume + } else { + + r.SizeGib = res.SizeGib + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigRootVolume is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigRootVolume *NodePoolConfigRootVolume = &NodePoolConfigRootVolume{empty: true} + +func (r *NodePoolConfigRootVolume) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigRootVolume) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigRootVolume) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigSshConfig struct { + empty bool `json:"-"` + AuthorizedKey *string `json:"authorizedKey"` +} + +type jsonNodePoolConfigSshConfig NodePoolConfigSshConfig + +func (r *NodePoolConfigSshConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigSshConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigSshConfig + } else { + + r.AuthorizedKey = res.AuthorizedKey + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigSshConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigSshConfig *NodePoolConfigSshConfig = &NodePoolConfigSshConfig{empty: true} + +func (r *NodePoolConfigSshConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigSshConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigSshConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolConfigProxyConfig struct { + empty bool `json:"-"` + ResourceGroupId *string `json:"resourceGroupId"` + SecretId *string `json:"secretId"` +} + +type jsonNodePoolConfigProxyConfig NodePoolConfigProxyConfig + +func (r *NodePoolConfigProxyConfig) UnmarshalJSON(data []byte) error { + var res jsonNodePoolConfigProxyConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolConfigProxyConfig + } else { + + r.ResourceGroupId = res.ResourceGroupId + + r.SecretId = res.SecretId + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolConfigProxyConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolConfigProxyConfig *NodePoolConfigProxyConfig = &NodePoolConfigProxyConfig{empty: true} + +func (r *NodePoolConfigProxyConfig) Empty() bool { + return r.empty +} + +func (r *NodePoolConfigProxyConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolConfigProxyConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolAutoscaling struct { + empty bool `json:"-"` + MinNodeCount *int64 `json:"minNodeCount"` + MaxNodeCount *int64 `json:"maxNodeCount"` +} + +type jsonNodePoolAutoscaling NodePoolAutoscaling + +func (r *NodePoolAutoscaling) UnmarshalJSON(data []byte) error { + var res jsonNodePoolAutoscaling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolAutoscaling + } else { + + r.MinNodeCount = res.MinNodeCount + + r.MaxNodeCount = res.MaxNodeCount + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolAutoscaling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolAutoscaling *NodePoolAutoscaling = &NodePoolAutoscaling{empty: true} + +func (r *NodePoolAutoscaling) Empty() bool { + return r.empty +} + +func (r *NodePoolAutoscaling) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolAutoscaling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolMaxPodsConstraint struct { + empty bool `json:"-"` + MaxPodsPerNode *int64 `json:"maxPodsPerNode"` +} + +type jsonNodePoolMaxPodsConstraint NodePoolMaxPodsConstraint + +func (r *NodePoolMaxPodsConstraint) UnmarshalJSON(data []byte) error { + var res jsonNodePoolMaxPodsConstraint + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolMaxPodsConstraint + } else { + + r.MaxPodsPerNode = res.MaxPodsPerNode + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolMaxPodsConstraint is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolMaxPodsConstraint *NodePoolMaxPodsConstraint = &NodePoolMaxPodsConstraint{empty: true} + +func (r *NodePoolMaxPodsConstraint) Empty() bool { + return r.empty +} + +func (r *NodePoolMaxPodsConstraint) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolMaxPodsConstraint) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type NodePoolManagement struct { + empty bool `json:"-"` + AutoRepair *bool `json:"autoRepair"` +} + +type jsonNodePoolManagement NodePoolManagement + +func (r *NodePoolManagement) UnmarshalJSON(data []byte) error { + var res jsonNodePoolManagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyNodePoolManagement + } else { + + r.AutoRepair = res.AutoRepair + + } + return nil +} + +// This object is used to assert a desired state where this NodePoolManagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyNodePoolManagement *NodePoolManagement = &NodePoolManagement{empty: true} + +func (r *NodePoolManagement) Empty() bool { + return r.empty +} + +func (r *NodePoolManagement) String() string { + return dcl.SprintResource(r) +} + +func (r *NodePoolManagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *NodePool) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "container_azure", + Type: "NodePool", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "containerazure", +{{- end }} + } +} + +func (r *NodePool) ID() (string, error) { + if err := extractNodePoolFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "config": dcl.ValueOrEmptyString(nr.Config), + "subnet_id": dcl.ValueOrEmptyString(nr.SubnetId), + "autoscaling": dcl.ValueOrEmptyString(nr.Autoscaling), + "state": dcl.ValueOrEmptyString(nr.State), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "max_pods_constraint": dcl.ValueOrEmptyString(nr.MaxPodsConstraint), + "management": dcl.ValueOrEmptyString(nr.Management), + "azure_availability_zone": dcl.ValueOrEmptyString(nr.AzureAvailabilityZone), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const NodePoolMaxPage = -1 + +type NodePoolList struct { + Items []*NodePool + + nextToken string + + pageSize int32 + + resource *NodePool +} + +func (l *NodePoolList) HasNext() bool { + return l.nextToken != "" +} + +func (l *NodePoolList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listNodePool(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListNodePool(ctx context.Context, project, location, cluster string) (*NodePoolList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListNodePoolWithMaxResults(ctx, project, location, cluster, NodePoolMaxPage) + +} + +func (c *Client) ListNodePoolWithMaxResults(ctx context.Context, project, location, cluster string, pageSize int32) (*NodePoolList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &NodePool{ + Project: &project, + Location: &location, + Cluster: &cluster, + } + items, token, err := c.listNodePool(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &NodePoolList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetNodePool(ctx context.Context, r *NodePool) (*NodePool, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractNodePoolFields(r) + + b, err := c.getNodePoolRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalNodePool(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Cluster = r.Cluster + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeNodePoolNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractNodePoolFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteNodePool(ctx context.Context, r *NodePool) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("NodePool resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting NodePool...") + deleteOp := deleteNodePoolOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllNodePool deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllNodePool(ctx context.Context, project, location, cluster string, filter func(*NodePool) bool) error { + listObj, err := c.ListNodePool(ctx, project, location, cluster) + if err != nil { + return err + } + + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllNodePool(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyNodePool(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *NodePool + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyNodePoolHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyNodePoolHelper(c *Client, ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyNodePool...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractNodePoolFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.nodePoolDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToNodePoolDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []nodePoolApiOperation + if create { + ops = append(ops, &createNodePoolOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyNodePoolDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyNodePoolDiff(c *Client, ctx context.Context, desired *NodePool, rawDesired *NodePool, ops []nodePoolApiOperation, opts ...dcl.ApplyOption) (*NodePool, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetNodePool(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createNodePoolOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapNodePool(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeNodePoolNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeNodePoolNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeNodePoolDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractNodePoolFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractNodePoolFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffNodePool(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl new file mode 100644 index 000000000000..1c1013d0fdf1 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/node_pool_internal.go.tmpl @@ -0,0 +1,3346 @@ +package containerazure + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *NodePool) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "version"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if err := dcl.Required(r, "subnetId"); err != nil { + return err + } + if err := dcl.Required(r, "autoscaling"); err != nil { + return err + } + if err := dcl.Required(r, "maxPodsConstraint"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Cluster, "Cluster"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Autoscaling) { + if err := r.Autoscaling.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MaxPodsConstraint) { + if err := r.MaxPodsConstraint.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Management) { + if err := r.Management.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfig) validate() error { + if err := dcl.Required(r, "sshConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RootVolume) { + if err := r.RootVolume.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SshConfig) { + if err := r.SshConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ProxyConfig) { + if err := r.ProxyConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *NodePoolConfigRootVolume) validate() error { + return nil +} +func (r *NodePoolConfigSshConfig) validate() error { + if err := dcl.Required(r, "authorizedKey"); err != nil { + return err + } + return nil +} +func (r *NodePoolConfigProxyConfig) validate() error { + if err := dcl.Required(r, "resourceGroupId"); err != nil { + return err + } + if err := dcl.Required(r, "secretId"); err != nil { + return err + } + return nil +} +func (r *NodePoolAutoscaling) validate() error { + if err := dcl.Required(r, "minNodeCount"); err != nil { + return err + } + if err := dcl.Required(r, "maxNodeCount"); err != nil { + return err + } + return nil +} +func (r *NodePoolMaxPodsConstraint) validate() error { + if err := dcl.Required(r, "maxPodsPerNode"); err != nil { + return err + } + return nil +} +func (r *NodePoolManagement) validate() error { + return nil +} +func (r *NodePool) basePath() string { + params := map[string]interface{}{ + "location": dcl.ValueOrEmptyString(r.Location), + } + return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) +} + +func (r *NodePool) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *NodePool) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools?azureNodePoolId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *NodePool) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// nodePoolApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type nodePoolApiOperation interface { + do(context.Context, *NodePool, *Client) error +} + +// newUpdateNodePoolUpdateAzureNodePoolRequest creates a request for an +// NodePool resource's UpdateAzureNodePool update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateNodePoolUpdateAzureNodePoolRequest(ctx context.Context, f *NodePool, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + req["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["config"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["autoscaling"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["management"] = v + } + b, err := c.getNodePoolRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + return req, nil +} + +// marshalUpdateNodePoolUpdateAzureNodePoolRequest converts the update into +// the final JSON request body. +func marshalUpdateNodePoolUpdateAzureNodePoolRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateNodePoolUpdateAzureNodePoolOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateNodePoolUpdateAzureNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + _, err := c.GetNodePool(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAzureNodePool") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateNodePoolUpdateAzureNodePoolRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateNodePoolUpdateAzureNodePoolRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listNodePoolRaw(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != NodePoolMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listNodePoolOperation struct { + AzureNodePools []map[string]interface{} `json:"azureNodePools"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listNodePool(ctx context.Context, r *NodePool, pageToken string, pageSize int32) ([]*NodePool, string, error) { + b, err := c.listNodePoolRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listNodePoolOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*NodePool + for _, v := range m.AzureNodePools { + res, err := unmarshalMapNodePool(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Cluster = r.Cluster + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllNodePool(ctx context.Context, f func(*NodePool) bool, resources []*NodePool) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteNodePool(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteNodePoolOperation struct{} + +func (op *deleteNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + r, err := c.GetNodePool(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "NodePool not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetNodePool checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetNodePool(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createNodePoolOperation struct { + response map[string]interface{} +} + +func (op *createNodePoolOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createNodePoolOperation) do(ctx context.Context, r *NodePool, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetNodePool(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getNodePoolRaw(ctx context.Context, r *NodePool) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) nodePoolDiffsForRawDesired(ctx context.Context, rawDesired *NodePool, opts ...dcl.ApplyOption) (initial, desired *NodePool, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *NodePool + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*NodePool); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected NodePool, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetNodePool(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a NodePool resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve NodePool resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that NodePool resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for NodePool: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for NodePool: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractNodePoolFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeNodePoolInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for NodePool: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeNodePoolDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for NodePool: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffNodePool(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeNodePoolInitialState(rawInitial, rawDesired *NodePool) (*NodePool, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeNodePoolDesiredState(rawDesired, rawInitial *NodePool, opts ...dcl.ApplyOption) (*NodePool, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, nil, opts...) + rawDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, nil, opts...) + rawDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, nil, opts...) + rawDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &NodePool{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.Version, rawInitial.Version) { + canonicalDesired.Version = rawInitial.Version + } else { + canonicalDesired.Version = rawDesired.Version + } + canonicalDesired.Config = canonicalizeNodePoolConfig(rawDesired.Config, rawInitial.Config, opts...) + if dcl.StringCanonicalize(rawDesired.SubnetId, rawInitial.SubnetId) { + canonicalDesired.SubnetId = rawInitial.SubnetId + } else { + canonicalDesired.SubnetId = rawDesired.SubnetId + } + canonicalDesired.Autoscaling = canonicalizeNodePoolAutoscaling(rawDesired.Autoscaling, rawInitial.Autoscaling, opts...) + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + canonicalDesired.MaxPodsConstraint = canonicalizeNodePoolMaxPodsConstraint(rawDesired.MaxPodsConstraint, rawInitial.MaxPodsConstraint, opts...) + canonicalDesired.Management = canonicalizeNodePoolManagement(rawDesired.Management, rawInitial.Management, opts...) + if dcl.StringCanonicalize(rawDesired.AzureAvailabilityZone, rawInitial.AzureAvailabilityZone) { + canonicalDesired.AzureAvailabilityZone = rawInitial.AzureAvailabilityZone + } else { + canonicalDesired.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Cluster, rawInitial.Cluster) { + canonicalDesired.Cluster = rawInitial.Cluster + } else { + canonicalDesired.Cluster = rawDesired.Cluster + } + return canonicalDesired, nil +} + +func canonicalizeNodePoolNewState(c *Client, rawNew, rawDesired *NodePool) (*NodePool, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + if dcl.StringCanonicalize(rawDesired.Version, rawNew.Version) { + rawNew.Version = rawDesired.Version + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { + rawNew.Config = rawDesired.Config + } else { + rawNew.Config = canonicalizeNewNodePoolConfig(c, rawDesired.Config, rawNew.Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.SubnetId) && dcl.IsEmptyValueIndirect(rawDesired.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } else { + if dcl.StringCanonicalize(rawDesired.SubnetId, rawNew.SubnetId) { + rawNew.SubnetId = rawDesired.SubnetId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Autoscaling) && dcl.IsEmptyValueIndirect(rawDesired.Autoscaling) { + rawNew.Autoscaling = rawDesired.Autoscaling + } else { + rawNew.Autoscaling = canonicalizeNewNodePoolAutoscaling(c, rawDesired.Autoscaling, rawNew.Autoscaling) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.MaxPodsConstraint) && dcl.IsEmptyValueIndirect(rawDesired.MaxPodsConstraint) { + rawNew.MaxPodsConstraint = rawDesired.MaxPodsConstraint + } else { + rawNew.MaxPodsConstraint = canonicalizeNewNodePoolMaxPodsConstraint(c, rawDesired.MaxPodsConstraint, rawNew.MaxPodsConstraint) + } + + if dcl.IsEmptyValueIndirect(rawNew.Management) && dcl.IsEmptyValueIndirect(rawDesired.Management) { + rawNew.Management = rawDesired.Management + } else { + rawNew.Management = canonicalizeNewNodePoolManagement(c, rawDesired.Management, rawNew.Management) + } + + if dcl.IsEmptyValueIndirect(rawNew.AzureAvailabilityZone) && dcl.IsEmptyValueIndirect(rawDesired.AzureAvailabilityZone) { + rawNew.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } else { + if dcl.StringCanonicalize(rawDesired.AzureAvailabilityZone, rawNew.AzureAvailabilityZone) { + rawNew.AzureAvailabilityZone = rawDesired.AzureAvailabilityZone + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Cluster = rawDesired.Cluster + + return rawNew, nil +} + +func canonicalizeNodePoolConfig(des, initial *NodePoolConfig, opts ...dcl.ApplyOption) *NodePoolConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfig{} + + if dcl.StringCanonicalize(des.VmSize, initial.VmSize) || dcl.IsZeroValue(des.VmSize) { + cDes.VmSize = initial.VmSize + } else { + cDes.VmSize = des.VmSize + } + cDes.RootVolume = canonicalizeNodePoolConfigRootVolume(des.RootVolume, initial.RootVolume, opts...) + if dcl.IsZeroValue(des.Tags) || (dcl.IsEmptyValueIndirect(des.Tags) && dcl.IsEmptyValueIndirect(initial.Tags)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + cDes.SshConfig = canonicalizeNodePoolConfigSshConfig(des.SshConfig, initial.SshConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + if dcl.StringCanonicalize(des.ImageType, initial.ImageType) || dcl.IsZeroValue(des.ImageType) { + cDes.ImageType = initial.ImageType + } else { + cDes.ImageType = des.ImageType + } +{{- end }} + cDes.ProxyConfig = canonicalizeNodePoolConfigProxyConfig(des.ProxyConfig, initial.ProxyConfig, opts...) + + return cDes +} + +func canonicalizeNodePoolConfigSlice(des, initial []NodePoolConfig, opts ...dcl.ApplyOption) []NodePoolConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfig(c *Client, des, nw *NodePoolConfig) *NodePoolConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.VmSize, nw.VmSize) { + nw.VmSize = des.VmSize + } + nw.RootVolume = canonicalizeNewNodePoolConfigRootVolume(c, des.RootVolume, nw.RootVolume) + nw.SshConfig = canonicalizeNewNodePoolConfigSshConfig(c, des.SshConfig, nw.SshConfig) +{{- if ne $.TargetVersionName "ga" }} + if dcl.StringCanonicalize(des.ImageType, nw.ImageType) { + nw.ImageType = des.ImageType + } +{{- end }} + nw.ProxyConfig = canonicalizeNewNodePoolConfigProxyConfig(c, des.ProxyConfig, nw.ProxyConfig) + + return nw +} + +func canonicalizeNewNodePoolConfigSet(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSlice(c *Client, des, nw []NodePoolConfig) []NodePoolConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigRootVolume(des, initial *NodePoolConfigRootVolume, opts ...dcl.ApplyOption) *NodePoolConfigRootVolume { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigRootVolume{} + + if dcl.IsZeroValue(des.SizeGib) || (dcl.IsEmptyValueIndirect(des.SizeGib) && dcl.IsEmptyValueIndirect(initial.SizeGib)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SizeGib = initial.SizeGib + } else { + cDes.SizeGib = des.SizeGib + } + + return cDes +} + +func canonicalizeNodePoolConfigRootVolumeSlice(des, initial []NodePoolConfigRootVolume, opts ...dcl.ApplyOption) []NodePoolConfigRootVolume { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigRootVolume, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigRootVolume(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigRootVolume(c *Client, des, nw *NodePoolConfigRootVolume) *NodePoolConfigRootVolume { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigRootVolume while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolConfigRootVolumeSet(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigRootVolume + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigRootVolumeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigRootVolumeSlice(c *Client, des, nw []NodePoolConfigRootVolume) []NodePoolConfigRootVolume { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigRootVolume + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigRootVolume(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigSshConfig(des, initial *NodePoolConfigSshConfig, opts ...dcl.ApplyOption) *NodePoolConfigSshConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigSshConfig{} + + if dcl.StringCanonicalize(des.AuthorizedKey, initial.AuthorizedKey) || dcl.IsZeroValue(des.AuthorizedKey) { + cDes.AuthorizedKey = initial.AuthorizedKey + } else { + cDes.AuthorizedKey = des.AuthorizedKey + } + + return cDes +} + +func canonicalizeNodePoolConfigSshConfigSlice(des, initial []NodePoolConfigSshConfig, opts ...dcl.ApplyOption) []NodePoolConfigSshConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigSshConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigSshConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigSshConfig(c *Client, des, nw *NodePoolConfigSshConfig) *NodePoolConfigSshConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigSshConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AuthorizedKey, nw.AuthorizedKey) { + nw.AuthorizedKey = des.AuthorizedKey + } + + return nw +} + +func canonicalizeNewNodePoolConfigSshConfigSet(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigSshConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigSshConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigSshConfigSlice(c *Client, des, nw []NodePoolConfigSshConfig) []NodePoolConfigSshConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigSshConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigSshConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolConfigProxyConfig(des, initial *NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) *NodePoolConfigProxyConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolConfigProxyConfig{} + + if dcl.StringCanonicalize(des.ResourceGroupId, initial.ResourceGroupId) || dcl.IsZeroValue(des.ResourceGroupId) { + cDes.ResourceGroupId = initial.ResourceGroupId + } else { + cDes.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, initial.SecretId) || dcl.IsZeroValue(des.SecretId) { + cDes.SecretId = initial.SecretId + } else { + cDes.SecretId = des.SecretId + } + + return cDes +} + +func canonicalizeNodePoolConfigProxyConfigSlice(des, initial []NodePoolConfigProxyConfig, opts ...dcl.ApplyOption) []NodePoolConfigProxyConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolConfigProxyConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolConfigProxyConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolConfigProxyConfig(c *Client, des, nw *NodePoolConfigProxyConfig) *NodePoolConfigProxyConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolConfigProxyConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ResourceGroupId, nw.ResourceGroupId) { + nw.ResourceGroupId = des.ResourceGroupId + } + if dcl.StringCanonicalize(des.SecretId, nw.SecretId) { + nw.SecretId = des.SecretId + } + + return nw +} + +func canonicalizeNewNodePoolConfigProxyConfigSet(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolConfigProxyConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolConfigProxyConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolConfigProxyConfigSlice(c *Client, des, nw []NodePoolConfigProxyConfig) []NodePoolConfigProxyConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolConfigProxyConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolConfigProxyConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolAutoscaling(des, initial *NodePoolAutoscaling, opts ...dcl.ApplyOption) *NodePoolAutoscaling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolAutoscaling{} + + if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinNodeCount = initial.MinNodeCount + } else { + cDes.MinNodeCount = des.MinNodeCount + } + if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxNodeCount = initial.MaxNodeCount + } else { + cDes.MaxNodeCount = des.MaxNodeCount + } + + return cDes +} + +func canonicalizeNodePoolAutoscalingSlice(des, initial []NodePoolAutoscaling, opts ...dcl.ApplyOption) []NodePoolAutoscaling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolAutoscaling, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolAutoscaling, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolAutoscaling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolAutoscaling(c *Client, des, nw *NodePoolAutoscaling) *NodePoolAutoscaling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolAutoscalingSet(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolAutoscaling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolAutoscalingSlice(c *Client, des, nw []NodePoolAutoscaling) []NodePoolAutoscaling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolAutoscaling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolAutoscaling(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolMaxPodsConstraint(des, initial *NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) *NodePoolMaxPodsConstraint { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolMaxPodsConstraint{} + + if dcl.IsZeroValue(des.MaxPodsPerNode) || (dcl.IsEmptyValueIndirect(des.MaxPodsPerNode) && dcl.IsEmptyValueIndirect(initial.MaxPodsPerNode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxPodsPerNode = initial.MaxPodsPerNode + } else { + cDes.MaxPodsPerNode = des.MaxPodsPerNode + } + + return cDes +} + +func canonicalizeNodePoolMaxPodsConstraintSlice(des, initial []NodePoolMaxPodsConstraint, opts ...dcl.ApplyOption) []NodePoolMaxPodsConstraint { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolMaxPodsConstraint(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolMaxPodsConstraint(c *Client, des, nw *NodePoolMaxPodsConstraint) *NodePoolMaxPodsConstraint { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolMaxPodsConstraint while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewNodePoolMaxPodsConstraintSet(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolMaxPodsConstraint + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolMaxPodsConstraintNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolMaxPodsConstraintSlice(c *Client, des, nw []NodePoolMaxPodsConstraint) []NodePoolMaxPodsConstraint { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolMaxPodsConstraint + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolMaxPodsConstraint(c, &d, &n)) + } + + return items +} + +func canonicalizeNodePoolManagement(des, initial *NodePoolManagement, opts ...dcl.ApplyOption) *NodePoolManagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &NodePoolManagement{} + + if dcl.BoolCanonicalize(des.AutoRepair, initial.AutoRepair) || dcl.IsZeroValue(des.AutoRepair) { + cDes.AutoRepair = initial.AutoRepair + } else { + cDes.AutoRepair = des.AutoRepair + } + + return cDes +} + +func canonicalizeNodePoolManagementSlice(des, initial []NodePoolManagement, opts ...dcl.ApplyOption) []NodePoolManagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]NodePoolManagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeNodePoolManagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]NodePoolManagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeNodePoolManagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewNodePoolManagement(c *Client, des, nw *NodePoolManagement) *NodePoolManagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for NodePoolManagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutoRepair, nw.AutoRepair) { + nw.AutoRepair = des.AutoRepair + } + + return nw +} + +func canonicalizeNewNodePoolManagementSet(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []NodePoolManagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareNodePoolManagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewNodePoolManagementSlice(c *Client, des, nw []NodePoolManagement) []NodePoolManagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []NodePoolManagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewNodePoolManagement(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffNodePool(c *Client, desired, actual *NodePool, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigNewStyle, EmptyObject: EmptyNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SubnetId, actual.SubnetId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareNodePoolAutoscalingNewStyle, EmptyObject: EmptyNodePoolAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxPodsConstraint, actual.MaxPodsConstraint, dcl.DiffInfo{ObjectFunction: compareNodePoolMaxPodsConstraintNewStyle, EmptyObject: EmptyNodePoolMaxPodsConstraint, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsConstraint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ObjectFunction: compareNodePoolManagementNewStyle, EmptyObject: EmptyNodePoolManagement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AzureAvailabilityZone, actual.AzureAvailabilityZone, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AzureAvailabilityZone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cluster, actual.Cluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig or *NodePoolConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.VmSize, actual.VmSize, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VmSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootVolume, actual.RootVolume, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareNodePoolConfigRootVolumeNewStyle, EmptyObject: EmptyNodePoolConfigRootVolume, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootVolume")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SshConfig, actual.SshConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigSshConfigNewStyle, EmptyObject: EmptyNodePoolConfigSshConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SshConfig")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ImageType, actual.ImageType, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageType")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ProxyConfig, actual.ProxyConfig, dcl.DiffInfo{ObjectFunction: compareNodePoolConfigProxyConfigNewStyle, EmptyObject: EmptyNodePoolConfigProxyConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProxyConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigRootVolume) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume or *NodePoolConfigRootVolume", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigRootVolume) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigRootVolume) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigRootVolume", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SizeGib, actual.SizeGib, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SizeGib")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigSshConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigSshConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig or *NodePoolConfigSshConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigSshConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigSshConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigSshConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AuthorizedKey, actual.AuthorizedKey, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("AuthorizedKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolConfigProxyConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolConfigProxyConfig) + if !ok { + desiredNotPointer, ok := d.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig or *NodePoolConfigProxyConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolConfigProxyConfig) + if !ok { + actualNotPointer, ok := a.(NodePoolConfigProxyConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolConfigProxyConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceGroupId, actual.ResourceGroupId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroupId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretId, actual.SecretId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolAutoscaling) + if !ok { + desiredNotPointer, ok := d.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling or *NodePoolAutoscaling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolAutoscaling) + if !ok { + actualNotPointer, ok := a.(NodePoolAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolAutoscaling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolMaxPodsConstraintNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolMaxPodsConstraint) + if !ok { + desiredNotPointer, ok := d.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint or *NodePoolMaxPodsConstraint", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolMaxPodsConstraint) + if !ok { + actualNotPointer, ok := a.(NodePoolMaxPodsConstraint) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolMaxPodsConstraint", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxPodsPerNode, actual.MaxPodsPerNode, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxPodsPerNode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareNodePoolManagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*NodePoolManagement) + if !ok { + desiredNotPointer, ok := d.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement or *NodePoolManagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*NodePoolManagement) + if !ok { + actualNotPointer, ok := a.(NodePoolManagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a NodePoolManagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutoRepair, actual.AutoRepair, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAzureNodePoolOperation")}, fn.AddNest("AutoRepair")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *NodePool) urlNormalized() *NodePool { + normalized := dcl.Copy(*r).(NodePool) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Version = dcl.SelfLinkToName(r.Version) + normalized.SubnetId = dcl.SelfLinkToName(r.SubnetId) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.AzureAvailabilityZone = dcl.SelfLinkToName(r.AzureAvailabilityZone) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Cluster = dcl.SelfLinkToName(r.Cluster) + return &normalized +} + +func (r *NodePool) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAzureNodePool" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "cluster": dcl.ValueOrEmptyString(nr.Cluster), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the NodePool resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *NodePool) marshal(c *Client) ([]byte, error) { + m, err := expandNodePool(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling NodePool: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalNodePool decodes JSON responses into the NodePool resource schema. +func unmarshalNodePool(b []byte, c *Client, res *NodePool) (*NodePool, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapNodePool(m, c, res) +} + +func unmarshalMapNodePool(m map[string]interface{}, c *Client, res *NodePool) (*NodePool, error) { + + flattened := flattenNodePool(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandNodePool expands NodePool into a JSON request object. +func expandNodePool(c *Client, f *NodePool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/azureClusters/%s/azureNodePools/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Cluster), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Version; dcl.ValueShouldBeSent(v) { + m["version"] = v + } + if v, err := expandNodePoolConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.SubnetId; dcl.ValueShouldBeSent(v) { + m["subnetId"] = v + } + if v, err := expandNodePoolAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscaling"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := expandNodePoolMaxPodsConstraint(c, f.MaxPodsConstraint, res); err != nil { + return nil, fmt.Errorf("error expanding MaxPodsConstraint into maxPodsConstraint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["maxPodsConstraint"] = v + } + if v, err := expandNodePoolManagement(c, f.Management, res); err != nil { + return nil, fmt.Errorf("error expanding Management into management: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v := f.AzureAvailabilityZone; dcl.ValueShouldBeSent(v) { + m["azureAvailabilityZone"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Cluster into cluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cluster"] = v + } + + return m, nil +} + +// flattenNodePool flattens NodePool from a JSON request object into the +// NodePool type. +func flattenNodePool(c *Client, i interface{}, res *NodePool) *NodePool { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &NodePool{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenString(m["version"]) + resultRes.Config = flattenNodePoolConfig(c, m["config"], res) + resultRes.SubnetId = dcl.FlattenString(m["subnetId"]) + resultRes.Autoscaling = flattenNodePoolAutoscaling(c, m["autoscaling"], res) + resultRes.State = flattenNodePoolStateEnum(m["state"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.MaxPodsConstraint = flattenNodePoolMaxPodsConstraint(c, m["maxPodsConstraint"], res) + resultRes.Management = flattenNodePoolManagement(c, m["management"], res) + resultRes.AzureAvailabilityZone = dcl.FlattenString(m["azureAvailabilityZone"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Cluster = dcl.FlattenString(m["cluster"]) + + return resultRes +} + +// expandNodePoolConfigMap expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigMap(c *Client, f map[string]NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSlice expands the contents of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfigSlice(c *Client, f []NodePoolConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigMap flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfig{} + } + + items := make(map[string]NodePoolConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSlice flattens the contents of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfig{} + } + + if len(a) == 0 { + return []NodePoolConfig{} + } + + items := make([]NodePoolConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfig expands an instance of NodePoolConfig into a JSON +// request object. +func expandNodePoolConfig(c *Client, f *NodePoolConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.VmSize; !dcl.IsEmptyValueIndirect(v) { + m["vmSize"] = v + } + if v, err := expandNodePoolConfigRootVolume(c, f.RootVolume, res); err != nil { + return nil, fmt.Errorf("error expanding RootVolume into rootVolume: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rootVolume"] = v + } + if v := f.Tags; !dcl.IsEmptyValueIndirect(v) { + m["tags"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v, err := expandNodePoolConfigSshConfig(c, f.SshConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SshConfig into sshConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sshConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v := f.ImageType; !dcl.IsEmptyValueIndirect(v) { + m["imageType"] = v + } +{{- end }} + if v, err := expandNodePoolConfigProxyConfig(c, f.ProxyConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ProxyConfig into proxyConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["proxyConfig"] = v + } + + return m, nil +} + +// flattenNodePoolConfig flattens an instance of NodePoolConfig from a JSON +// response object. +func flattenNodePoolConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfig + } + r.VmSize = dcl.FlattenString(m["vmSize"]) + r.RootVolume = flattenNodePoolConfigRootVolume(c, m["rootVolume"], res) + r.Tags = dcl.FlattenKeyValuePairs(m["tags"]) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.SshConfig = flattenNodePoolConfigSshConfig(c, m["sshConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.ImageType = dcl.FlattenString(m["imageType"]) +{{- end }} + r.ProxyConfig = flattenNodePoolConfigProxyConfig(c, m["proxyConfig"], res) + + return r +} + +// expandNodePoolConfigRootVolumeMap expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeMap(c *Client, f map[string]NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigRootVolumeSlice expands the contents of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolumeSlice(c *Client, f []NodePoolConfigRootVolume, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigRootVolume(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigRootVolumeMap flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigRootVolume { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigRootVolume{} + } + + items := make(map[string]NodePoolConfigRootVolume) + for k, item := range a { + items[k] = *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigRootVolumeSlice flattens the contents of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolumeSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigRootVolume { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigRootVolume{} + } + + if len(a) == 0 { + return []NodePoolConfigRootVolume{} + } + + items := make([]NodePoolConfigRootVolume, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigRootVolume(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigRootVolume expands an instance of NodePoolConfigRootVolume into a JSON +// request object. +func expandNodePoolConfigRootVolume(c *Client, f *NodePoolConfigRootVolume, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SizeGib; !dcl.IsEmptyValueIndirect(v) { + m["sizeGib"] = v + } + + return m, nil +} + +// flattenNodePoolConfigRootVolume flattens an instance of NodePoolConfigRootVolume from a JSON +// response object. +func flattenNodePoolConfigRootVolume(c *Client, i interface{}, res *NodePool) *NodePoolConfigRootVolume { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigRootVolume{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigRootVolume + } + r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) + + return r +} + +// expandNodePoolConfigSshConfigMap expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigMap(c *Client, f map[string]NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigSshConfigSlice expands the contents of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfigSlice(c *Client, f []NodePoolConfigSshConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigSshConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigSshConfigMap flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigSshConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigSshConfig{} + } + + items := make(map[string]NodePoolConfigSshConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigSshConfigSlice flattens the contents of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigSshConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigSshConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigSshConfig{} + } + + items := make([]NodePoolConfigSshConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigSshConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigSshConfig expands an instance of NodePoolConfigSshConfig into a JSON +// request object. +func expandNodePoolConfigSshConfig(c *Client, f *NodePoolConfigSshConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AuthorizedKey; !dcl.IsEmptyValueIndirect(v) { + m["authorizedKey"] = v + } + + return m, nil +} + +// flattenNodePoolConfigSshConfig flattens an instance of NodePoolConfigSshConfig from a JSON +// response object. +func flattenNodePoolConfigSshConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigSshConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigSshConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigSshConfig + } + r.AuthorizedKey = dcl.FlattenString(m["authorizedKey"]) + + return r +} + +// expandNodePoolConfigProxyConfigMap expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigMap(c *Client, f map[string]NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolConfigProxyConfigSlice expands the contents of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfigSlice(c *Client, f []NodePoolConfigProxyConfig, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolConfigProxyConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolConfigProxyConfigMap flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolConfigProxyConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return map[string]NodePoolConfigProxyConfig{} + } + + items := make(map[string]NodePoolConfigProxyConfig) + for k, item := range a { + items[k] = *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolConfigProxyConfigSlice flattens the contents of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfigSlice(c *Client, i interface{}, res *NodePool) []NodePoolConfigProxyConfig { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolConfigProxyConfig{} + } + + if len(a) == 0 { + return []NodePoolConfigProxyConfig{} + } + + items := make([]NodePoolConfigProxyConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolConfigProxyConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolConfigProxyConfig expands an instance of NodePoolConfigProxyConfig into a JSON +// request object. +func expandNodePoolConfigProxyConfig(c *Client, f *NodePoolConfigProxyConfig, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ResourceGroupId; !dcl.IsEmptyValueIndirect(v) { + m["resourceGroupId"] = v + } + if v := f.SecretId; !dcl.IsEmptyValueIndirect(v) { + m["secretId"] = v + } + + return m, nil +} + +// flattenNodePoolConfigProxyConfig flattens an instance of NodePoolConfigProxyConfig from a JSON +// response object. +func flattenNodePoolConfigProxyConfig(c *Client, i interface{}, res *NodePool) *NodePoolConfigProxyConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolConfigProxyConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolConfigProxyConfig + } + r.ResourceGroupId = dcl.FlattenString(m["resourceGroupId"]) + r.SecretId = dcl.FlattenString(m["secretId"]) + + return r +} + +// expandNodePoolAutoscalingMap expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingMap(c *Client, f map[string]NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolAutoscalingSlice expands the contents of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscalingSlice(c *Client, f []NodePoolAutoscaling, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolAutoscalingMap flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolAutoscaling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolAutoscaling{} + } + + if len(a) == 0 { + return map[string]NodePoolAutoscaling{} + } + + items := make(map[string]NodePoolAutoscaling) + for k, item := range a { + items[k] = *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolAutoscalingSlice flattens the contents of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscalingSlice(c *Client, i interface{}, res *NodePool) []NodePoolAutoscaling { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolAutoscaling{} + } + + if len(a) == 0 { + return []NodePoolAutoscaling{} + } + + items := make([]NodePoolAutoscaling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolAutoscaling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolAutoscaling expands an instance of NodePoolAutoscaling into a JSON +// request object. +func expandNodePoolAutoscaling(c *Client, f *NodePoolAutoscaling, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["minNodeCount"] = v + } + if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["maxNodeCount"] = v + } + + return m, nil +} + +// flattenNodePoolAutoscaling flattens an instance of NodePoolAutoscaling from a JSON +// response object. +func flattenNodePoolAutoscaling(c *Client, i interface{}, res *NodePool) *NodePoolAutoscaling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolAutoscaling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolAutoscaling + } + r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) + r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) + + return r +} + +// expandNodePoolMaxPodsConstraintMap expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintMap(c *Client, f map[string]NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolMaxPodsConstraintSlice expands the contents of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraintSlice(c *Client, f []NodePoolMaxPodsConstraint, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolMaxPodsConstraint(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolMaxPodsConstraintMap flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolMaxPodsConstraint { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return map[string]NodePoolMaxPodsConstraint{} + } + + items := make(map[string]NodePoolMaxPodsConstraint) + for k, item := range a { + items[k] = *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolMaxPodsConstraintSlice flattens the contents of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraintSlice(c *Client, i interface{}, res *NodePool) []NodePoolMaxPodsConstraint { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolMaxPodsConstraint{} + } + + if len(a) == 0 { + return []NodePoolMaxPodsConstraint{} + } + + items := make([]NodePoolMaxPodsConstraint, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolMaxPodsConstraint(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolMaxPodsConstraint expands an instance of NodePoolMaxPodsConstraint into a JSON +// request object. +func expandNodePoolMaxPodsConstraint(c *Client, f *NodePoolMaxPodsConstraint, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxPodsPerNode; !dcl.IsEmptyValueIndirect(v) { + m["maxPodsPerNode"] = v + } + + return m, nil +} + +// flattenNodePoolMaxPodsConstraint flattens an instance of NodePoolMaxPodsConstraint from a JSON +// response object. +func flattenNodePoolMaxPodsConstraint(c *Client, i interface{}, res *NodePool) *NodePoolMaxPodsConstraint { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolMaxPodsConstraint{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolMaxPodsConstraint + } + r.MaxPodsPerNode = dcl.FlattenInteger(m["maxPodsPerNode"]) + + return r +} + +// expandNodePoolManagementMap expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementMap(c *Client, f map[string]NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandNodePoolManagementSlice expands the contents of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagementSlice(c *Client, f []NodePoolManagement, res *NodePool) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandNodePoolManagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenNodePoolManagementMap flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolManagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolManagement{} + } + + if len(a) == 0 { + return map[string]NodePoolManagement{} + } + + items := make(map[string]NodePoolManagement) + for k, item := range a { + items[k] = *flattenNodePoolManagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenNodePoolManagementSlice flattens the contents of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagementSlice(c *Client, i interface{}, res *NodePool) []NodePoolManagement { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolManagement{} + } + + if len(a) == 0 { + return []NodePoolManagement{} + } + + items := make([]NodePoolManagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolManagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandNodePoolManagement expands an instance of NodePoolManagement into a JSON +// request object. +func expandNodePoolManagement(c *Client, f *NodePoolManagement, res *NodePool) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutoRepair; !dcl.IsEmptyValueIndirect(v) { + m["autoRepair"] = v + } + + return m, nil +} + +// flattenNodePoolManagement flattens an instance of NodePoolManagement from a JSON +// response object. +func flattenNodePoolManagement(c *Client, i interface{}, res *NodePool) *NodePoolManagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &NodePoolManagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyNodePoolManagement + } + r.AutoRepair = dcl.FlattenBool(m["autoRepair"]) + + return r +} + +// flattenNodePoolStateEnumMap flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumMap(c *Client, i interface{}, res *NodePool) map[string]NodePoolStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]NodePoolStateEnum{} + } + + if len(a) == 0 { + return map[string]NodePoolStateEnum{} + } + + items := make(map[string]NodePoolStateEnum) + for k, item := range a { + items[k] = *flattenNodePoolStateEnum(item.(interface{})) + } + + return items +} + +// flattenNodePoolStateEnumSlice flattens the contents of NodePoolStateEnum from a JSON +// response object. +func flattenNodePoolStateEnumSlice(c *Client, i interface{}, res *NodePool) []NodePoolStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []NodePoolStateEnum{} + } + + if len(a) == 0 { + return []NodePoolStateEnum{} + } + + items := make([]NodePoolStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenNodePoolStateEnum(item.(interface{}))) + } + + return items +} + +// flattenNodePoolStateEnum asserts that an interface is a string, and returns a +// pointer to a *NodePoolStateEnum with the same value as that string. +func flattenNodePoolStateEnum(i interface{}) *NodePoolStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return NodePoolStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *NodePool) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalNodePool(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Cluster == nil && ncr.Cluster == nil { + c.Config.Logger.Info("Both Cluster fields null - considering equal.") + } else if nr.Cluster == nil || ncr.Cluster == nil { + c.Config.Logger.Info("Only one Cluster field is null - considering unequal.") + return false + } else if *nr.Cluster != *ncr.Cluster { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type nodePoolDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp nodePoolApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToNodePoolDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]nodePoolDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []nodePoolDiff + // For each operation name, create a nodePoolDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := nodePoolDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToNodePoolApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToNodePoolApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (nodePoolApiOperation, error) { + switch opName { + + case "updateNodePoolUpdateAzureNodePoolOperation": + return &updateNodePoolUpdateAzureNodePoolOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := extractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := extractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := extractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := extractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + return nil +} +func extractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func extractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func extractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +func extractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { + return nil +} +func extractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func extractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func extractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} + +func postReadExtractNodePoolFields(r *NodePool) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &NodePoolConfig{} + } + if err := postReadExtractNodePoolConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vAutoscaling := r.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &NodePoolAutoscaling{} + } + if err := postReadExtractNodePoolAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + r.Autoscaling = vAutoscaling + } + vMaxPodsConstraint := r.MaxPodsConstraint + if vMaxPodsConstraint == nil { + // note: explicitly not the empty object. + vMaxPodsConstraint = &NodePoolMaxPodsConstraint{} + } + if err := postReadExtractNodePoolMaxPodsConstraintFields(r, vMaxPodsConstraint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMaxPodsConstraint) { + r.MaxPodsConstraint = vMaxPodsConstraint + } + vManagement := r.Management + if vManagement == nil { + // note: explicitly not the empty object. + vManagement = &NodePoolManagement{} + } + if err := postReadExtractNodePoolManagementFields(r, vManagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagement) { + r.Management = vManagement + } + return nil +} +func postReadExtractNodePoolConfigFields(r *NodePool, o *NodePoolConfig) error { + vRootVolume := o.RootVolume + if vRootVolume == nil { + // note: explicitly not the empty object. + vRootVolume = &NodePoolConfigRootVolume{} + } + if err := extractNodePoolConfigRootVolumeFields(r, vRootVolume); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRootVolume) { + o.RootVolume = vRootVolume + } + vSshConfig := o.SshConfig + if vSshConfig == nil { + // note: explicitly not the empty object. + vSshConfig = &NodePoolConfigSshConfig{} + } + if err := extractNodePoolConfigSshConfigFields(r, vSshConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSshConfig) { + o.SshConfig = vSshConfig + } + vProxyConfig := o.ProxyConfig + if vProxyConfig == nil { + // note: explicitly not the empty object. + vProxyConfig = &NodePoolConfigProxyConfig{} + } + if err := extractNodePoolConfigProxyConfigFields(r, vProxyConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vProxyConfig) { + o.ProxyConfig = vProxyConfig + } + return nil +} +func postReadExtractNodePoolConfigRootVolumeFields(r *NodePool, o *NodePoolConfigRootVolume) error { + return nil +} +func postReadExtractNodePoolConfigSshConfigFields(r *NodePool, o *NodePoolConfigSshConfig) error { + return nil +} +func postReadExtractNodePoolConfigProxyConfigFields(r *NodePool, o *NodePoolConfigProxyConfig) error { + return nil +} +func postReadExtractNodePoolAutoscalingFields(r *NodePool, o *NodePoolAutoscaling) error { + return nil +} +func postReadExtractNodePoolMaxPodsConstraintFields(r *NodePool, o *NodePoolMaxPodsConstraint) error { + return nil +} +func postReadExtractNodePoolManagementFields(r *NodePool, o *NodePoolManagement) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl new file mode 100644 index 000000000000..2a7a424f1343 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl @@ -0,0 +1,417 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLNodePoolSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "ContainerAzure/NodePool", + Description: "An Anthos node pool running on Azure.", + StructName: "NodePool", + Reference: &dcl.Link{ + Text: "API reference", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClusters.azureNodePools", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Multicloud overview", + URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "nodePool", + Required: true, + Description: "A full instance of a NodePool", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "cluster", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many NodePool", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "cluster", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "NodePool": &dcl.Component{ + Title: "NodePool", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "version", + "config", + "subnetId", + "autoscaling", + "maxPodsConstraint", + "project", + "location", + "cluster", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + }, + "autoscaling": &dcl.Property{ + Type: "object", + GoName: "Autoscaling", + GoType: "NodePoolAutoscaling", + Description: "Autoscaler configuration for this node pool.", + Required: []string{ + "minNodeCount", + "maxNodeCount", + }, + Properties: map[string]*dcl.Property{ + "maxNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxNodeCount", + Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", + }, + "minNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MinNodeCount", + Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", + }, + }, + }, + "azureAvailabilityZone": &dcl.Property{ + Type: "string", + GoName: "AzureAvailabilityZone", + Description: "Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`.", + Immutable: true, + ServerDefault: true, + }, + "cluster": &dcl.Property{ + Type: "string", + GoName: "Cluster", + Description: "The azureCluster for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkemulticloud/Cluster", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "config": &dcl.Property{ + Type: "object", + GoName: "Config", + GoType: "NodePoolConfig", + Description: "The node configuration of the node pool.", + Required: []string{ + "sshConfig", + }, + Properties: map[string]*dcl.Property{ +{{- if ne $.TargetVersionName "ga" }} + "imageType": &dcl.Property{ + Type: "string", + GoName: "ImageType", + Description: "The OS image type to use on node pool instances.", + Immutable: true, + ServerDefault: true, + }, +{{- end }} + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Immutable: true, + }, + "proxyConfig": &dcl.Property{ + Type: "object", + GoName: "ProxyConfig", + GoType: "NodePoolConfigProxyConfig", + Description: "Proxy configuration for outbound HTTP(S) traffic.", + Immutable: true, + Required: []string{ + "resourceGroupId", + "secretId", + }, + Properties: map[string]*dcl.Property{ + "resourceGroupId": &dcl.Property{ + Type: "string", + GoName: "ResourceGroupId", + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + Immutable: true, + }, + "secretId": &dcl.Property{ + Type: "string", + GoName: "SecretId", + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + Immutable: true, + }, + }, + }, + "rootVolume": &dcl.Property{ + Type: "object", + GoName: "RootVolume", + GoType: "NodePoolConfigRootVolume", + Description: "Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "sizeGib": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SizeGib", + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "sshConfig": &dcl.Property{ + Type: "object", + GoName: "SshConfig", + GoType: "NodePoolConfigSshConfig", + Description: "SSH configuration for how to access the node pool machines.", + Required: []string{ + "authorizedKey", + }, + Properties: map[string]*dcl.Property{ + "authorizedKey": &dcl.Property{ + Type: "string", + GoName: "AuthorizedKey", + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + }, + "tags": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Tags", + Description: "Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Immutable: true, + }, + "vmSize": &dcl.Property{ + Type: "string", + GoName: "VmSize", + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time at which this node pool was created.", + Immutable: true, + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "management": &dcl.Property{ + Type: "object", + GoName: "Management", + GoType: "NodePoolManagement", + Description: "The Management configuration for this node pool.", + Properties: map[string]*dcl.Property{ + "autoRepair": &dcl.Property{ + Type: "boolean", + GoName: "AutoRepair", + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + }, + "maxPodsConstraint": &dcl.Property{ + Type: "object", + GoName: "MaxPodsConstraint", + GoType: "NodePoolMaxPodsConstraint", + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + Immutable: true, + Required: []string{ + "maxPodsPerNode", + }, + Properties: map[string]*dcl.Property{ + "maxPodsPerNode": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxPodsPerNode", + Description: "The maximum number of pods to schedule on a single node.", + Immutable: true, + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of this resource.", + Immutable: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "reconciling": &dcl.Property{ + Type: "boolean", + GoName: "Reconciling", + ReadOnly: true, + Description: "Output only. If set, there are currently pending changes to the node pool.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "NodePoolStateEnum", + ReadOnly: true, + Description: "Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR", + "DEGRADED", + }, + }, + "subnetId": &dcl.Property{ + Type: "string", + GoName: "SubnetId", + Description: "The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration.", + Immutable: true, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. A globally unique identifier for the node pool.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time at which this node pool was last updated.", + Immutable: true, + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool.", + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go new file mode 100644 index 000000000000..e20d1a835114 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package containerazure + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLContainerAzureClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.ContainerAzureBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go new file mode 100644 index 000000000000..9892900b9e40 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client.go @@ -0,0 +1,272 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureClient() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClientCreate, + Read: resourceContainerAzureClientRead, + Delete: resourceContainerAzureClientDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClientImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Active Directory Application ID.", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "tenant_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Active Directory Tenant ID.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "certificate": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The PEM encoded x509 certificate.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this resource was created.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the client.", + }, + }, + } +} + +func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyClient(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Client: %s", err) + } + + log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) + + return resourceContainerAzureClientRead(d, meta) +} + +func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetClient(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureClient %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("application_id", res.ApplicationId); err != nil { + return fmt.Errorf("error setting application_id in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("tenant_id", res.TenantId); err != nil { + return fmt.Errorf("error setting tenant_id in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("certificate", res.Certificate); err != nil { + return fmt.Errorf("error setting certificate in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} + +func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Client %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteClient(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Client: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Client %q", d.Id()) + return nil +} + +func resourceContainerAzureClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClients/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl new file mode 100644 index 000000000000..55a1e33739d4 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl @@ -0,0 +1,98 @@ +package containerazure_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +{{- else }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccContainerAzureClient_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureClientDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureClient_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_client.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerAzureClient_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_azure_client" "primary" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +`, context) +} + +func testAccCheckContainerAzureClientDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_client" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(rs.Primary.Attributes["application_id"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + TenantId: dcl.String(rs.Primary.Attributes["tenant_id"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Certificate: dcl.StringOrNil(rs.Primary.Attributes["certificate"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetClient(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_client still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl new file mode 100644 index 000000000000..0e40dc78cefd --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl @@ -0,0 +1,1441 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +{{- else }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClusterCreate, + Read: resourceContainerAzureClusterRead, + Update: resourceContainerAzureClusterUpdate, + Delete: resourceContainerAzureClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAzureClusterAuthorizationSchema(), + }, + + "azure_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterNetworkingSchema(), + }, + + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*`", + }, + + "azure_services_authentication": { + Type: schema.TypeList, + Optional: true, + Description: "Azure authentication configuration for management of Azure resources", + MaxItems: 1, + Elem: ContainerAzureClusterAzureServicesAuthenticationSchema(), + ConflictsWith: []string{"client"}, + }, + + "client": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", + ConflictsWith: []string{"azure_services_authentication"}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + +{{- if ne $.TargetVersionName "ga" }} + "logging_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Logging configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterLoggingConfigSchema(), + }, + +{{- end }} + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAzureClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + Description: "Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAzureClusterAuthorizationAdminUsersSchema(), + }, + + "admin_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAzureClusterAuthorizationAdminGroupsSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAzureClusterAuthorizationAdminGroupsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + Description: "The name of the group, e.g. `my-group@domain.com`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + Description: "SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSshConfigSchema(), + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig.", + }, + + "database_encryption": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to application-layer secrets encryption.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneProxyConfigSchema(), + }, + + "replica_placements": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible.", + Elem: ContainerAzureClusterControlPlaneReplicaPlacementsSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying control plane Azure resources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + }, + + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneReplicaPlacementsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "azure_availability_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAzureClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "virtual_network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation.", + }, + }, + } +} + +func ContainerAzureClusterAzureServicesAuthenticationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + Description: "The Azure Active Directory Application ID for Authentication configuration.", + }, + + "tenant_id": { + Type: schema.TypeString, + Required: true, + Description: "The Azure Active Directory Tenant ID for Authentication configuration.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func ContainerAzureClusterLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration of the logging components.", + MaxItems: 1, + Elem: ContainerAzureClusterLoggingConfigComponentConfigSchema(), + }, + }, + } +} + +func ContainerAzureClusterLoggingConfigComponentConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: "Components of the logging configuration to be enabled.", + Elem: &schema.Schema{Type: schema.TypeString}, +{{- end }} + }, + }, + } +} + +func ContainerAzureClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureCluster %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAzureClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("azure_region", res.AzureRegion); err != nil { + return fmt.Errorf("error setting azure_region in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAzureClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAzureClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAzureClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("resource_group_id", res.ResourceGroupId); err != nil { + return fmt.Errorf("error setting resource_group_id in state: %s", err) + } + if err = d.Set("azure_services_authentication", flattenContainerAzureClusterAzureServicesAuthentication(res.AzureServicesAuthentication)); err != nil { + return fmt.Errorf("error setting azure_services_authentication in state: %s", err) + } + if err = d.Set("client", res.Client); err != nil { + return fmt.Errorf("error setting client in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) +{{- if ne $.TargetVersionName "ga" }} + } + if err = d.Set("logging_config", flattenContainerAzureClusterLoggingConfig(res.LoggingConfig)); err != nil { + return fmt.Errorf("error setting logging_config in state: %s", err) +{{- end }} + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAzureClusterAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAzureClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), + Client: dcl.String(d.Get("client").(string)), + Description: dcl.String(d.Get("description").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), +{{- if ne $.TargetVersionName "ga" }} + LoggingConfig: expandContainerAzureClusterLoggingConfig(d.Get("logging_config")), +{{- end }} + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureClusterAuthorization(o interface{}) *containerazure.ClusterAuthorization { + if o == nil { + return containerazure.EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterAuthorization{ + AdminUsers: expandContainerAzureClusterAuthorizationAdminUsersArray(obj["admin_users"]), + AdminGroups: expandContainerAzureClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), + } +} + +func flattenContainerAzureClusterAuthorization(obj *containerazure.ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAzureClusterAuthorizationAdminUsersArray(obj.AdminUsers), + "admin_groups": flattenContainerAzureClusterAuthorizationAdminGroupsArray(obj.AdminGroups), + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []containerazure.ClusterAuthorizationAdminUsers { + if o == nil { + return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + } + + items := make([]containerazure.ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterAuthorizationAdminUsers(o interface{}) *containerazure.ClusterAuthorizationAdminUsers { + if o == nil { + return containerazure.EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &containerazure.ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []containerazure.ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterAuthorizationAdminUsers(obj *containerazure.ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} +func expandContainerAzureClusterAuthorizationAdminGroupsArray(o interface{}) []containerazure.ClusterAuthorizationAdminGroups { + if o == nil { + return make([]containerazure.ClusterAuthorizationAdminGroups, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containerazure.ClusterAuthorizationAdminGroups, 0) + } + + items := make([]containerazure.ClusterAuthorizationAdminGroups, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterAuthorizationAdminGroups(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterAuthorizationAdminGroups(o interface{}) *containerazure.ClusterAuthorizationAdminGroups { + if o == nil { + return containerazure.EmptyClusterAuthorizationAdminGroups + } + + obj := o.(map[string]interface{}) + return &containerazure.ClusterAuthorizationAdminGroups{ + Group: dcl.String(obj["group"].(string)), + } +} + +func flattenContainerAzureClusterAuthorizationAdminGroupsArray(objs []containerazure.ClusterAuthorizationAdminGroups) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterAuthorizationAdminGroups(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterAuthorizationAdminGroups(obj *containerazure.ClusterAuthorizationAdminGroups) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "group": obj.Group, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlane(o interface{}) *containerazure.ClusterControlPlane { + if o == nil { + return containerazure.EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlane{ + SshConfig: expandContainerAzureClusterControlPlaneSshConfig(obj["ssh_config"]), + SubnetId: dcl.String(obj["subnet_id"].(string)), + Version: dcl.String(obj["version"].(string)), + DatabaseEncryption: expandContainerAzureClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + MainVolume: expandContainerAzureClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAzureClusterControlPlaneProxyConfig(obj["proxy_config"]), + ReplicaPlacements: expandContainerAzureClusterControlPlaneReplicaPlacementsArray(obj["replica_placements"]), + RootVolume: expandContainerAzureClusterControlPlaneRootVolume(obj["root_volume"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureClusterControlPlane(obj *containerazure.ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureClusterControlPlaneSshConfig(obj.SshConfig), + "subnet_id": obj.SubnetId, + "version": obj.Version, + "database_encryption": flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "main_volume": flattenContainerAzureClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAzureClusterControlPlaneProxyConfig(obj.ProxyConfig), + "replica_placements": flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(obj.ReplicaPlacements), + "root_volume": flattenContainerAzureClusterControlPlaneRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneSshConfig(o interface{}) *containerazure.ClusterControlPlaneSshConfig { + if o == nil { + return containerazure.EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneSshConfig(obj *containerazure.ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneDatabaseEncryption(o interface{}) *containerazure.ClusterControlPlaneDatabaseEncryption { + if o == nil { + return containerazure.EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneDatabaseEncryption{ + KeyId: dcl.String(obj["key_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *containerazure.ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "key_id": obj.KeyId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *containerazure.ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneMainVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneMainVolume(obj *containerazure.ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneProxyConfig(o interface{}) *containerazure.ClusterControlPlaneProxyConfig { + if o == nil { + return containerazure.EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneProxyConfig{ + ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), + SecretId: dcl.String(obj["secret_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneProxyConfig(obj *containerazure.ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_group_id": obj.ResourceGroupId, + "secret_id": obj.SecretId, + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{}) []containerazure.ClusterControlPlaneReplicaPlacements { + if o == nil { + return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + } + + items := make([]containerazure.ClusterControlPlaneReplicaPlacements, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterControlPlaneReplicaPlacements(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterControlPlaneReplicaPlacements(o interface{}) *containerazure.ClusterControlPlaneReplicaPlacements { + if o == nil { + return containerazure.EmptyClusterControlPlaneReplicaPlacements + } + + obj := o.(map[string]interface{}) + return &containerazure.ClusterControlPlaneReplicaPlacements{ + AzureAvailabilityZone: dcl.String(obj["azure_availability_zone"].(string)), + SubnetId: dcl.String(obj["subnet_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []containerazure.ClusterControlPlaneReplicaPlacements) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterControlPlaneReplicaPlacements(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *containerazure.ClusterControlPlaneReplicaPlacements) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "azure_availability_zone": obj.AzureAvailabilityZone, + "subnet_id": obj.SubnetId, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *containerazure.ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneRootVolume(obj *containerazure.ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterFleet(o interface{}) *containerazure.ClusterFleet { + if o == nil { + return containerazure.EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAzureClusterFleet(obj *containerazure.ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterNetworking(o interface{}) *containerazure.ClusterNetworking { + if o == nil { + return containerazure.EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterNetworking{ + PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), + VirtualNetworkId: dcl.String(obj["virtual_network_id"].(string)), + } +} + +func flattenContainerAzureClusterNetworking(obj *containerazure.ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "virtual_network_id": obj.VirtualNetworkId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterAzureServicesAuthentication(o interface{}) *containerazure.ClusterAzureServicesAuthentication { + if o == nil { + return containerazure.EmptyClusterAzureServicesAuthentication + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyClusterAzureServicesAuthentication + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterAzureServicesAuthentication{ + ApplicationId: dcl.String(obj["application_id"].(string)), + TenantId: dcl.String(obj["tenant_id"].(string)), + } +} + +func flattenContainerAzureClusterAzureServicesAuthentication(obj *containerazure.ClusterAzureServicesAuthentication) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "application_id": obj.ApplicationId, + "tenant_id": obj.TenantId, + } + + return []interface{}{transformed} + +} + +{{- if ne $.TargetVersionName "ga" }} +func expandContainerAzureClusterLoggingConfig(o interface{}) *containerazure.ClusterLoggingConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterLoggingConfig{ + ComponentConfig: expandContainerAzureClusterLoggingConfigComponentConfig(obj["component_config"]), + } +} + +func flattenContainerAzureClusterLoggingConfig(obj *containerazure.ClusterLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "component_config": flattenContainerAzureClusterLoggingConfigComponentConfig(obj.ComponentConfig), + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterLoggingConfigComponentConfig(o interface{}) *containerazure.ClusterLoggingConfigComponentConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterLoggingConfigComponentConfig{ + EnableComponents: expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), + } +} + +func flattenContainerAzureClusterLoggingConfigComponentConfig(obj *containerazure.ClusterLoggingConfigComponentConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_components": flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj.EnableComponents), + } + + return []interface{}{transformed} + +} + +{{- end }} +func flattenContainerAzureClusterWorkloadIdentityConfig(obj *containerazure.ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} + +func flattenContainerAzureClusterAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} +{{- if ne $.TargetVersionName "ga" }} + +func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum { + objs := o.([]interface{}) + items := make([]containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl new file mode 100644 index 000000000000..e78919da273b --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl @@ -0,0 +1,672 @@ +package containerazure_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +{{- else }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccContainerAzureCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAzureCluster_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +func TestAccContainerAzureCluster_BetaBasicEnumHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BetaBasicEnumHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + { + Config: testAccContainerAzureCluster_BetaBasicEnumHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAzureCluster_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { +{{- if ne $.TargetVersionName "ga" }} + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicEnumHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BetaBasicEnumHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta +{{- end }} + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccCheckContainerAzureClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.Cluster{ + AzureRegion: dcl.String(rs.Primary.Attributes["azure_region"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + ResourceGroupId: dcl.String(rs.Primary.Attributes["resource_group_id"]), + Client: dcl.String(rs.Primary.Attributes["client"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl new file mode 100644 index 000000000000..632f0e7d3875 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl @@ -0,0 +1,841 @@ +package containerazure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +{{- else }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAzureNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureNodePoolCreate, + Read: resourceContainerAzureNodePoolRead, + Update: resourceContainerAzureNodePoolUpdate, + Delete: resourceContainerAzureNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetAnnotationsDiff, + ), + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + Description: "Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The azureCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + Description: "The node configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool.", + }, + + "azure_availability_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`.", + }, + + "effective_annotations": { + Type: schema.TypeMap, + Computed: true, + Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", + }, + + "management": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "The Management configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolManagementSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.\n\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently pending changes to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAzureNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + Description: "SSH configuration for how to access the node pool machines.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSshConfigSchema(), +{{- if ne $.TargetVersionName "ga" }} + }, + + "image_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The OS image type to use on node pool instances.", +{{- end }} + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + }, + + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func ContainerAzureNodePoolManagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Optional. Whether or not the nodes will be automatically repaired.", + }, + }, + } +} + +func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureNodePool %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAzureNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAzureNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAzureNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("azure_availability_zone", res.AzureAvailabilityZone); err != nil { + return fmt.Errorf("error setting azure_availability_zone in state: %s", err) + } + if err = d.Set("effective_annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting effective_annotations in state: %s", err) + } + if err = d.Set("management", tpgresource.FlattenContainerAzureNodePoolManagement(res.Management, d, config)); err != nil { + return fmt.Errorf("error setting management in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("annotations", flattenContainerAzureNodePoolAnnotations(res.Annotations, d)); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + Management: expandContainerAzureNodePoolManagement(d.Get("management")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)/azureNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureNodePoolAutoscaling(o interface{}) *containerazure.NodePoolAutoscaling { + if o == nil { + return containerazure.EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAzureNodePoolAutoscaling(obj *containerazure.NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfig(o interface{}) *containerazure.NodePoolConfig { + if o == nil { + return containerazure.EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfig{ + SshConfig: expandContainerAzureNodePoolConfigSshConfig(obj["ssh_config"]), +{{- if ne $.TargetVersionName "ga" }} + ImageType: dcl.StringOrNil(obj["image_type"].(string)), +{{- end }} + Labels: tpgresource.CheckStringMap(obj["labels"]), + ProxyConfig: expandContainerAzureNodePoolConfigProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAzureNodePoolConfigRootVolume(obj["root_volume"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureNodePoolConfig(obj *containerazure.NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureNodePoolConfigSshConfig(obj.SshConfig), +{{- if ne $.TargetVersionName "ga" }} + "image_type": obj.ImageType, +{{- end }} + "labels": obj.Labels, + "proxy_config": flattenContainerAzureNodePoolConfigProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAzureNodePoolConfigRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigSshConfig(o interface{}) *containerazure.NodePoolConfigSshConfig { + if o == nil { + return containerazure.EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfigSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureNodePoolConfigSshConfig(obj *containerazure.NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigProxyConfig(o interface{}) *containerazure.NodePoolConfigProxyConfig { + if o == nil { + return containerazure.EmptyNodePoolConfigProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyNodePoolConfigProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfigProxyConfig{ + ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), + SecretId: dcl.String(obj["secret_id"].(string)), + } +} + +func flattenContainerAzureNodePoolConfigProxyConfig(obj *containerazure.NodePoolConfigProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_group_id": obj.ResourceGroupId, + "secret_id": obj.SecretId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *containerazure.NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfigRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureNodePoolConfigRootVolume(obj *containerazure.NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolMaxPodsConstraint(o interface{}) *containerazure.NodePoolMaxPodsConstraint { + if o == nil { + return containerazure.EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return containerazure.EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAzureNodePoolMaxPodsConstraint(obj *containerazure.NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolManagement(o interface{}) *containerazure.NodePoolManagement { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolManagement{ + AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), + } +} + +func flattenContainerAzureNodePoolManagement(obj *containerazure.NodePoolManagement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "auto_repair": obj.AutoRepair, + } + + return []interface{}{transformed} + +} + +func flattenContainerAzureNodePoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("annotations").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl new file mode 100644 index 000000000000..3a608d2b3d29 --- /dev/null +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl @@ -0,0 +1,599 @@ +package containerazure_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +{{- else }} + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccContainerAzureNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAzureNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccContainerAzureNodePool_BetaBasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": envvar.GetTestProjectFromEnv(), + "project_number": envvar.GetTestProjectNumberFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerAzureNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureNodePool_BetaBasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + { + Config: testAccContainerAzureNodePool_BetaBasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"management.#", "management.0.%", "management.0.auto_repair", "annotations"}, + }, + }, + }) +} +{{- end }} + +func testAccContainerAzureNodePool_BasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + management { + auto_repair = true + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_two = "label_two" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-two = "value-two" + } + + management { + auto_repair = false +{{- if ne $.TargetVersionName "ga" }} + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BetaBasicHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + management { + auto_repair = true + } + + annotations = { + annotation-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BetaBasicHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_container_azure_versions" "versions" { + provider = google-beta + project = "%{project_name}" + location = "us-west1" +} + + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_two = "label_two" + } + + vm_size = "Standard_DS2_v2" + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + management { + auto_repair = false + } + + annotations = { + annotation-two = "value-two" +{{- end }} + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAzureNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + AzureAvailabilityZone: dcl.StringOrNil(rs.Primary.Attributes["azure_availability_zone"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl new file mode 100644 index 000000000000..abedea89dad7 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl @@ -0,0 +1,1005 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Asset struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *AssetStateEnum `json:"state"` + ResourceSpec *AssetResourceSpec `json:"resourceSpec"` + ResourceStatus *AssetResourceStatus `json:"resourceStatus"` + SecurityStatus *AssetSecurityStatus `json:"securityStatus"` + DiscoverySpec *AssetDiscoverySpec `json:"discoverySpec"` + DiscoveryStatus *AssetDiscoveryStatus `json:"discoveryStatus"` + Project *string `json:"project"` + Location *string `json:"location"` + Lake *string `json:"lake"` + DataplexZone *string `json:"dataplexZone"` +} + +func (r *Asset) String() string { + return dcl.SprintResource(r) +} + +// The enum AssetStateEnum. +type AssetStateEnum string + +// AssetStateEnumRef returns a *AssetStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetStateEnumRef(s string) *AssetStateEnum { + v := AssetStateEnum(s) + return &v +} + +func (v AssetStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceSpecTypeEnum. +type AssetResourceSpecTypeEnum string + +// AssetResourceSpecTypeEnumRef returns a *AssetResourceSpecTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceSpecTypeEnumRef(s string) *AssetResourceSpecTypeEnum { + v := AssetResourceSpecTypeEnum(s) + return &v +} + +func (v AssetResourceSpecTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STORAGE_BUCKET", "BIGQUERY_DATASET"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceSpecTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceSpecReadAccessModeEnum. +type AssetResourceSpecReadAccessModeEnum string + +// AssetResourceSpecReadAccessModeEnumRef returns a *AssetResourceSpecReadAccessModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceSpecReadAccessModeEnumRef(s string) *AssetResourceSpecReadAccessModeEnum { + v := AssetResourceSpecReadAccessModeEnum(s) + return &v +} + +func (v AssetResourceSpecReadAccessModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"DIRECT", "MANAGED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceSpecReadAccessModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetResourceStatusStateEnum. +type AssetResourceStatusStateEnum string + +// AssetResourceStatusStateEnumRef returns a *AssetResourceStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetResourceStatusStateEnumRef(s string) *AssetResourceStatusStateEnum { + v := AssetResourceStatusStateEnum(s) + return &v +} + +func (v AssetResourceStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "READY", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetResourceStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetSecurityStatusStateEnum. +type AssetSecurityStatusStateEnum string + +// AssetSecurityStatusStateEnumRef returns a *AssetSecurityStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetSecurityStatusStateEnumRef(s string) *AssetSecurityStatusStateEnum { + v := AssetSecurityStatusStateEnum(s) + return &v +} + +func (v AssetSecurityStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "READY", "APPLYING", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetSecurityStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum AssetDiscoveryStatusStateEnum. +type AssetDiscoveryStatusStateEnum string + +// AssetDiscoveryStatusStateEnumRef returns a *AssetDiscoveryStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func AssetDiscoveryStatusStateEnumRef(s string) *AssetDiscoveryStatusStateEnum { + v := AssetDiscoveryStatusStateEnum(s) + return &v +} + +func (v AssetDiscoveryStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "SCHEDULED", "IN_PROGRESS", "PAUSED", "DISABLED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "AssetDiscoveryStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type AssetResourceSpec struct { + empty bool `json:"-"` + Name *string `json:"name"` + Type *AssetResourceSpecTypeEnum `json:"type"` + ReadAccessMode *AssetResourceSpecReadAccessModeEnum `json:"readAccessMode"` +} + +type jsonAssetResourceSpec AssetResourceSpec + +func (r *AssetResourceSpec) UnmarshalJSON(data []byte) error { + var res jsonAssetResourceSpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetResourceSpec + } else { + + r.Name = res.Name + + r.Type = res.Type + + r.ReadAccessMode = res.ReadAccessMode + + } + return nil +} + +// This object is used to assert a desired state where this AssetResourceSpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetResourceSpec *AssetResourceSpec = &AssetResourceSpec{empty: true} + +func (r *AssetResourceSpec) Empty() bool { + return r.empty +} + +func (r *AssetResourceSpec) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetResourceSpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetResourceStatus struct { + empty bool `json:"-"` + State *AssetResourceStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` +} + +type jsonAssetResourceStatus AssetResourceStatus + +func (r *AssetResourceStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetResourceStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetResourceStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this AssetResourceStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetResourceStatus *AssetResourceStatus = &AssetResourceStatus{empty: true} + +func (r *AssetResourceStatus) Empty() bool { + return r.empty +} + +func (r *AssetResourceStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetResourceStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetSecurityStatus struct { + empty bool `json:"-"` + State *AssetSecurityStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` +} + +type jsonAssetSecurityStatus AssetSecurityStatus + +func (r *AssetSecurityStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetSecurityStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetSecurityStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this AssetSecurityStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetSecurityStatus *AssetSecurityStatus = &AssetSecurityStatus{empty: true} + +func (r *AssetSecurityStatus) Empty() bool { + return r.empty +} + +func (r *AssetSecurityStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetSecurityStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpec struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + IncludePatterns []string `json:"includePatterns"` + ExcludePatterns []string `json:"excludePatterns"` + CsvOptions *AssetDiscoverySpecCsvOptions `json:"csvOptions"` + JsonOptions *AssetDiscoverySpecJsonOptions `json:"jsonOptions"` + Schedule *string `json:"schedule"` +} + +type jsonAssetDiscoverySpec AssetDiscoverySpec + +func (r *AssetDiscoverySpec) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpec + } else { + + r.Enabled = res.Enabled + + r.IncludePatterns = res.IncludePatterns + + r.ExcludePatterns = res.ExcludePatterns + + r.CsvOptions = res.CsvOptions + + r.JsonOptions = res.JsonOptions + + r.Schedule = res.Schedule + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpec *AssetDiscoverySpec = &AssetDiscoverySpec{empty: true} + +func (r *AssetDiscoverySpec) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpec) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpecCsvOptions struct { + empty bool `json:"-"` + HeaderRows *int64 `json:"headerRows"` + Delimiter *string `json:"delimiter"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonAssetDiscoverySpecCsvOptions AssetDiscoverySpecCsvOptions + +func (r *AssetDiscoverySpecCsvOptions) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpecCsvOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpecCsvOptions + } else { + + r.HeaderRows = res.HeaderRows + + r.Delimiter = res.Delimiter + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpecCsvOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpecCsvOptions *AssetDiscoverySpecCsvOptions = &AssetDiscoverySpecCsvOptions{empty: true} + +func (r *AssetDiscoverySpecCsvOptions) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpecCsvOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpecCsvOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoverySpecJsonOptions struct { + empty bool `json:"-"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonAssetDiscoverySpecJsonOptions AssetDiscoverySpecJsonOptions + +func (r *AssetDiscoverySpecJsonOptions) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoverySpecJsonOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoverySpecJsonOptions + } else { + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoverySpecJsonOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoverySpecJsonOptions *AssetDiscoverySpecJsonOptions = &AssetDiscoverySpecJsonOptions{empty: true} + +func (r *AssetDiscoverySpecJsonOptions) Empty() bool { + return r.empty +} + +func (r *AssetDiscoverySpecJsonOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoverySpecJsonOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoveryStatus struct { + empty bool `json:"-"` + State *AssetDiscoveryStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` + LastRunTime *string `json:"lastRunTime"` + Stats *AssetDiscoveryStatusStats `json:"stats"` + LastRunDuration *string `json:"lastRunDuration"` +} + +type jsonAssetDiscoveryStatus AssetDiscoveryStatus + +func (r *AssetDiscoveryStatus) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoveryStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoveryStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + r.LastRunTime = res.LastRunTime + + r.Stats = res.Stats + + r.LastRunDuration = res.LastRunDuration + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoveryStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoveryStatus *AssetDiscoveryStatus = &AssetDiscoveryStatus{empty: true} + +func (r *AssetDiscoveryStatus) Empty() bool { + return r.empty +} + +func (r *AssetDiscoveryStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoveryStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AssetDiscoveryStatusStats struct { + empty bool `json:"-"` + DataItems *int64 `json:"dataItems"` + DataSize *int64 `json:"dataSize"` + Tables *int64 `json:"tables"` + Filesets *int64 `json:"filesets"` +} + +type jsonAssetDiscoveryStatusStats AssetDiscoveryStatusStats + +func (r *AssetDiscoveryStatusStats) UnmarshalJSON(data []byte) error { + var res jsonAssetDiscoveryStatusStats + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAssetDiscoveryStatusStats + } else { + + r.DataItems = res.DataItems + + r.DataSize = res.DataSize + + r.Tables = res.Tables + + r.Filesets = res.Filesets + + } + return nil +} + +// This object is used to assert a desired state where this AssetDiscoveryStatusStats is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAssetDiscoveryStatusStats *AssetDiscoveryStatusStats = &AssetDiscoveryStatusStats{empty: true} + +func (r *AssetDiscoveryStatusStats) Empty() bool { + return r.empty +} + +func (r *AssetDiscoveryStatusStats) String() string { + return dcl.SprintResource(r) +} + +func (r *AssetDiscoveryStatusStats) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Asset) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Asset", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Asset) ID() (string, error) { + if err := extractAssetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "resource_spec": dcl.ValueOrEmptyString(nr.ResourceSpec), + "resource_status": dcl.ValueOrEmptyString(nr.ResourceStatus), + "security_status": dcl.ValueOrEmptyString(nr.SecurityStatus), + "discovery_spec": dcl.ValueOrEmptyString(nr.DiscoverySpec), + "discovery_status": dcl.ValueOrEmptyString(nr.DiscoveryStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "dataplex_zone": dcl.ValueOrEmptyString(nr.DataplexZone), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplex_zone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const AssetMaxPage = -1 + +type AssetList struct { + Items []*Asset + + nextToken string + + pageSize int32 + + resource *Asset +} + +func (l *AssetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *AssetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listAsset(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListAsset(ctx context.Context, project, location, dataplexZone, lake string) (*AssetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListAssetWithMaxResults(ctx, project, location, dataplexZone, lake, AssetMaxPage) + +} + +func (c *Client) ListAssetWithMaxResults(ctx context.Context, project, location, dataplexZone, lake string, pageSize int32) (*AssetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Asset{ + Project: &project, + Location: &location, + DataplexZone: &dataplexZone, + Lake: &lake, + } + items, token, err := c.listAsset(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &AssetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetAsset(ctx context.Context, r *Asset) (*Asset, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractAssetFields(r) + + b, err := c.getAssetRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalAsset(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.DataplexZone = r.DataplexZone + result.Lake = r.Lake + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeAssetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractAssetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteAsset(ctx context.Context, r *Asset) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Asset resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Asset...") + deleteOp := deleteAssetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllAsset deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllAsset(ctx context.Context, project, location, dataplexZone, lake string, filter func(*Asset) bool) error { + listObj, err := c.ListAsset(ctx, project, location, dataplexZone, lake) + if err != nil { + return err + } + + err = c.deleteAllAsset(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllAsset(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyAsset(ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Asset + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyAssetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyAssetHelper(c *Client, ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyAsset...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractAssetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.assetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToAssetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []assetApiOperation + if create { + ops = append(ops, &createAssetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyAssetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyAssetDiff(c *Client, ctx context.Context, desired *Asset, rawDesired *Asset, ops []assetApiOperation, opts ...dcl.ApplyOption) (*Asset, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetAsset(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createAssetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapAsset(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeAssetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeAssetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeAssetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractAssetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractAssetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffAsset(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Asset) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl new file mode 100644 index 000000000000..3176de95c68c --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl @@ -0,0 +1,4139 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Asset) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "resourceSpec"); err != nil { + return err + } + if err := dcl.Required(r, "discoverySpec"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Lake, "Lake"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.DataplexZone, "DataplexZone"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ResourceSpec) { + if err := r.ResourceSpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ResourceStatus) { + if err := r.ResourceStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecurityStatus) { + if err := r.SecurityStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DiscoverySpec) { + if err := r.DiscoverySpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DiscoveryStatus) { + if err := r.DiscoveryStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetResourceSpec) validate() error { + if err := dcl.Required(r, "type"); err != nil { + return err + } + return nil +} +func (r *AssetResourceStatus) validate() error { + return nil +} +func (r *AssetSecurityStatus) validate() error { + return nil +} +func (r *AssetDiscoverySpec) validate() error { + if err := dcl.Required(r, "enabled"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.CsvOptions) { + if err := r.CsvOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.JsonOptions) { + if err := r.JsonOptions.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetDiscoverySpecCsvOptions) validate() error { + return nil +} +func (r *AssetDiscoverySpecJsonOptions) validate() error { + return nil +} +func (r *AssetDiscoveryStatus) validate() error { + if !dcl.IsEmptyValueIndirect(r.Stats) { + if err := r.Stats.validate(); err != nil { + return err + } + } + return nil +} +func (r *AssetDiscoveryStatusStats) validate() error { + return nil +} +func (r *Asset) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Asset) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Asset) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Asset) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets?assetId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Asset) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Asset) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Asset) SetPolicyVerb() string { + return "" +} + +func (r *Asset) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Asset) IAMPolicyVersion() int { + return 3 +} + +// assetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type assetApiOperation interface { + do(context.Context, *Asset, *Client) error +} + +// newUpdateAssetUpdateAssetRequest creates a request for an +// Asset resource's UpdateAsset update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateAssetUpdateAssetRequest(ctx context.Context, f *Asset, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandAssetResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["resourceSpec"] = v + } + if v, err := expandAssetResourceStatus(c, f.ResourceStatus, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceStatus into resourceStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["resourceStatus"] = v + } + if v, err := expandAssetSecurityStatus(c, f.SecurityStatus, res); err != nil { + return nil, fmt.Errorf("error expanding SecurityStatus into securityStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["securityStatus"] = v + } + if v, err := expandAssetDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoverySpec"] = v + } + if v, err := expandAssetDiscoveryStatus(c, f.DiscoveryStatus, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoveryStatus into discoveryStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoveryStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", *f.Project, *f.Location, *f.Lake, *f.DataplexZone, *f.Name) + + return req, nil +} + +// marshalUpdateAssetUpdateAssetRequest converts the update into +// the final JSON request body. +func marshalUpdateAssetUpdateAssetRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateAssetUpdateAssetOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateAssetUpdateAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + _, err := c.GetAsset(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAsset") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateAssetUpdateAssetRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateAssetUpdateAssetRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listAssetRaw(ctx context.Context, r *Asset, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != AssetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listAssetOperation struct { + Assets []map[string]interface{} `json:"assets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listAsset(ctx context.Context, r *Asset, pageToken string, pageSize int32) ([]*Asset, string, error) { + b, err := c.listAssetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listAssetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Asset + for _, v := range m.Assets { + res, err := unmarshalMapAsset(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.DataplexZone = r.DataplexZone + res.Lake = r.Lake + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllAsset(ctx context.Context, f func(*Asset) bool, resources []*Asset) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteAsset(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteAssetOperation struct{} + +func (op *deleteAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + r, err := c.GetAsset(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Asset not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetAsset checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetAsset(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createAssetOperation struct { + response map[string]interface{} +} + +func (op *createAssetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createAssetOperation) do(ctx context.Context, r *Asset, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetAsset(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getAssetRaw(ctx context.Context, r *Asset) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) assetDiffsForRawDesired(ctx context.Context, rawDesired *Asset, opts ...dcl.ApplyOption) (initial, desired *Asset, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Asset + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Asset); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Asset, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetAsset(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Asset resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Asset resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Asset resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeAssetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Asset: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Asset: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractAssetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeAssetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Asset: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeAssetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Asset: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffAsset(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeAssetInitialState(rawInitial, rawDesired *Asset) (*Asset, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeAssetDesiredState(rawDesired, rawInitial *Asset, opts ...dcl.ApplyOption) (*Asset, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.ResourceSpec = canonicalizeAssetResourceSpec(rawDesired.ResourceSpec, nil, opts...) + rawDesired.ResourceStatus = canonicalizeAssetResourceStatus(rawDesired.ResourceStatus, nil, opts...) + rawDesired.SecurityStatus = canonicalizeAssetSecurityStatus(rawDesired.SecurityStatus, nil, opts...) + rawDesired.DiscoverySpec = canonicalizeAssetDiscoverySpec(rawDesired.DiscoverySpec, nil, opts...) + rawDesired.DiscoveryStatus = canonicalizeAssetDiscoveryStatus(rawDesired.DiscoveryStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Asset{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.ResourceSpec = canonicalizeAssetResourceSpec(rawDesired.ResourceSpec, rawInitial.ResourceSpec, opts...) + canonicalDesired.DiscoverySpec = canonicalizeAssetDiscoverySpec(rawDesired.DiscoverySpec, rawInitial.DiscoverySpec, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Lake, rawInitial.Lake) { + canonicalDesired.Lake = rawInitial.Lake + } else { + canonicalDesired.Lake = rawDesired.Lake + } + if dcl.NameToSelfLink(rawDesired.DataplexZone, rawInitial.DataplexZone) { + canonicalDesired.DataplexZone = rawInitial.DataplexZone + } else { + canonicalDesired.DataplexZone = rawDesired.DataplexZone + } + return canonicalDesired, nil +} + +func canonicalizeAssetNewState(c *Client, rawNew, rawDesired *Asset) (*Asset, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceSpec) && dcl.IsEmptyValueIndirect(rawDesired.ResourceSpec) { + rawNew.ResourceSpec = rawDesired.ResourceSpec + } else { + rawNew.ResourceSpec = canonicalizeNewAssetResourceSpec(c, rawDesired.ResourceSpec, rawNew.ResourceSpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceStatus) && dcl.IsEmptyValueIndirect(rawDesired.ResourceStatus) { + rawNew.ResourceStatus = rawDesired.ResourceStatus + } else { + rawNew.ResourceStatus = canonicalizeNewAssetResourceStatus(c, rawDesired.ResourceStatus, rawNew.ResourceStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.SecurityStatus) && dcl.IsEmptyValueIndirect(rawDesired.SecurityStatus) { + rawNew.SecurityStatus = rawDesired.SecurityStatus + } else { + rawNew.SecurityStatus = canonicalizeNewAssetSecurityStatus(c, rawDesired.SecurityStatus, rawNew.SecurityStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoverySpec) && dcl.IsEmptyValueIndirect(rawDesired.DiscoverySpec) { + rawNew.DiscoverySpec = rawDesired.DiscoverySpec + } else { + rawNew.DiscoverySpec = canonicalizeNewAssetDiscoverySpec(c, rawDesired.DiscoverySpec, rawNew.DiscoverySpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoveryStatus) && dcl.IsEmptyValueIndirect(rawDesired.DiscoveryStatus) { + rawNew.DiscoveryStatus = rawDesired.DiscoveryStatus + } else { + rawNew.DiscoveryStatus = canonicalizeNewAssetDiscoveryStatus(c, rawDesired.DiscoveryStatus, rawNew.DiscoveryStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Lake = rawDesired.Lake + + rawNew.DataplexZone = rawDesired.DataplexZone + + return rawNew, nil +} + +func canonicalizeAssetResourceSpec(des, initial *AssetResourceSpec, opts ...dcl.ApplyOption) *AssetResourceSpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetResourceSpec{} + + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.IsZeroValue(des.Type) || (dcl.IsEmptyValueIndirect(des.Type) && dcl.IsEmptyValueIndirect(initial.Type)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Type = initial.Type + } else { + cDes.Type = des.Type + } + if dcl.IsZeroValue(des.ReadAccessMode) || (dcl.IsEmptyValueIndirect(des.ReadAccessMode) && dcl.IsEmptyValueIndirect(initial.ReadAccessMode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ReadAccessMode = initial.ReadAccessMode + } else { + cDes.ReadAccessMode = des.ReadAccessMode + } + + return cDes +} + +func canonicalizeAssetResourceSpecSlice(des, initial []AssetResourceSpec, opts ...dcl.ApplyOption) []AssetResourceSpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetResourceSpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetResourceSpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetResourceSpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetResourceSpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetResourceSpec(c *Client, des, nw *AssetResourceSpec) *AssetResourceSpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetResourceSpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + + return nw +} + +func canonicalizeNewAssetResourceSpecSet(c *Client, des, nw []AssetResourceSpec) []AssetResourceSpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetResourceSpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetResourceSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetResourceSpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetResourceSpecSlice(c *Client, des, nw []AssetResourceSpec) []AssetResourceSpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetResourceSpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetResourceSpec(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetResourceStatus(des, initial *AssetResourceStatus, opts ...dcl.ApplyOption) *AssetResourceStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetResourceStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeAssetResourceStatusSlice(des, initial []AssetResourceStatus, opts ...dcl.ApplyOption) []AssetResourceStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetResourceStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetResourceStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetResourceStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetResourceStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetResourceStatus(c *Client, des, nw *AssetResourceStatus) *AssetResourceStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetResourceStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + + return nw +} + +func canonicalizeNewAssetResourceStatusSet(c *Client, des, nw []AssetResourceStatus) []AssetResourceStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetResourceStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetResourceStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetResourceStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetResourceStatusSlice(c *Client, des, nw []AssetResourceStatus) []AssetResourceStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetResourceStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetResourceStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetSecurityStatus(des, initial *AssetSecurityStatus, opts ...dcl.ApplyOption) *AssetSecurityStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetSecurityStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeAssetSecurityStatusSlice(des, initial []AssetSecurityStatus, opts ...dcl.ApplyOption) []AssetSecurityStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetSecurityStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetSecurityStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetSecurityStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetSecurityStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetSecurityStatus(c *Client, des, nw *AssetSecurityStatus) *AssetSecurityStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetSecurityStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + + return nw +} + +func canonicalizeNewAssetSecurityStatusSet(c *Client, des, nw []AssetSecurityStatus) []AssetSecurityStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetSecurityStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetSecurityStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetSecurityStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetSecurityStatusSlice(c *Client, des, nw []AssetSecurityStatus) []AssetSecurityStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetSecurityStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetSecurityStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpec(des, initial *AssetDiscoverySpec, opts ...dcl.ApplyOption) *AssetDiscoverySpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpec{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, initial.IncludePatterns) { + cDes.IncludePatterns = initial.IncludePatterns + } else { + cDes.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, initial.ExcludePatterns) { + cDes.ExcludePatterns = initial.ExcludePatterns + } else { + cDes.ExcludePatterns = des.ExcludePatterns + } + cDes.CsvOptions = canonicalizeAssetDiscoverySpecCsvOptions(des.CsvOptions, initial.CsvOptions, opts...) + cDes.JsonOptions = canonicalizeAssetDiscoverySpecJsonOptions(des.JsonOptions, initial.JsonOptions, opts...) + if dcl.StringCanonicalize(des.Schedule, initial.Schedule) || dcl.IsZeroValue(des.Schedule) { + cDes.Schedule = initial.Schedule + } else { + cDes.Schedule = des.Schedule + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecSlice(des, initial []AssetDiscoverySpec, opts ...dcl.ApplyOption) []AssetDiscoverySpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpec(c *Client, des, nw *AssetDiscoverySpec) *AssetDiscoverySpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, nw.IncludePatterns) { + nw.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, nw.ExcludePatterns) { + nw.ExcludePatterns = des.ExcludePatterns + } + nw.CsvOptions = canonicalizeNewAssetDiscoverySpecCsvOptions(c, des.CsvOptions, nw.CsvOptions) + nw.JsonOptions = canonicalizeNewAssetDiscoverySpecJsonOptions(c, des.JsonOptions, nw.JsonOptions) + if dcl.StringCanonicalize(des.Schedule, nw.Schedule) { + nw.Schedule = des.Schedule + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecSet(c *Client, des, nw []AssetDiscoverySpec) []AssetDiscoverySpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecSlice(c *Client, des, nw []AssetDiscoverySpec) []AssetDiscoverySpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpec(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpecCsvOptions(des, initial *AssetDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) *AssetDiscoverySpecCsvOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpecCsvOptions{} + + if dcl.IsZeroValue(des.HeaderRows) || (dcl.IsEmptyValueIndirect(des.HeaderRows) && dcl.IsEmptyValueIndirect(initial.HeaderRows)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.HeaderRows = initial.HeaderRows + } else { + cDes.HeaderRows = des.HeaderRows + } + if dcl.StringCanonicalize(des.Delimiter, initial.Delimiter) || dcl.IsZeroValue(des.Delimiter) { + cDes.Delimiter = initial.Delimiter + } else { + cDes.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecCsvOptionsSlice(des, initial []AssetDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) []AssetDiscoverySpecCsvOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpecCsvOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpecCsvOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpecCsvOptions(c *Client, des, nw *AssetDiscoverySpecCsvOptions) *AssetDiscoverySpecCsvOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpecCsvOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Delimiter, nw.Delimiter) { + nw.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecCsvOptionsSet(c *Client, des, nw []AssetDiscoverySpecCsvOptions) []AssetDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpecCsvOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecCsvOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpecCsvOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecCsvOptionsSlice(c *Client, des, nw []AssetDiscoverySpecCsvOptions) []AssetDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpecCsvOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpecCsvOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoverySpecJsonOptions(des, initial *AssetDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) *AssetDiscoverySpecJsonOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoverySpecJsonOptions{} + + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeAssetDiscoverySpecJsonOptionsSlice(des, initial []AssetDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) []AssetDiscoverySpecJsonOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoverySpecJsonOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoverySpecJsonOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoverySpecJsonOptions(c *Client, des, nw *AssetDiscoverySpecJsonOptions) *AssetDiscoverySpecJsonOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoverySpecJsonOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewAssetDiscoverySpecJsonOptionsSet(c *Client, des, nw []AssetDiscoverySpecJsonOptions) []AssetDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoverySpecJsonOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoverySpecJsonOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoverySpecJsonOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoverySpecJsonOptionsSlice(c *Client, des, nw []AssetDiscoverySpecJsonOptions) []AssetDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoverySpecJsonOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoverySpecJsonOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoveryStatus(des, initial *AssetDiscoveryStatus, opts ...dcl.ApplyOption) *AssetDiscoveryStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoveryStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.LastRunTime) || (dcl.IsEmptyValueIndirect(des.LastRunTime) && dcl.IsEmptyValueIndirect(initial.LastRunTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LastRunTime = initial.LastRunTime + } else { + cDes.LastRunTime = des.LastRunTime + } + cDes.Stats = canonicalizeAssetDiscoveryStatusStats(des.Stats, initial.Stats, opts...) + if dcl.StringCanonicalize(des.LastRunDuration, initial.LastRunDuration) || dcl.IsZeroValue(des.LastRunDuration) { + cDes.LastRunDuration = initial.LastRunDuration + } else { + cDes.LastRunDuration = des.LastRunDuration + } + + return cDes +} + +func canonicalizeAssetDiscoveryStatusSlice(des, initial []AssetDiscoveryStatus, opts ...dcl.ApplyOption) []AssetDiscoveryStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoveryStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoveryStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoveryStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoveryStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoveryStatus(c *Client, des, nw *AssetDiscoveryStatus) *AssetDiscoveryStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoveryStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + nw.Stats = canonicalizeNewAssetDiscoveryStatusStats(c, des.Stats, nw.Stats) + if dcl.StringCanonicalize(des.LastRunDuration, nw.LastRunDuration) { + nw.LastRunDuration = des.LastRunDuration + } + + return nw +} + +func canonicalizeNewAssetDiscoveryStatusSet(c *Client, des, nw []AssetDiscoveryStatus) []AssetDiscoveryStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoveryStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoveryStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoveryStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoveryStatusSlice(c *Client, des, nw []AssetDiscoveryStatus) []AssetDiscoveryStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoveryStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoveryStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeAssetDiscoveryStatusStats(des, initial *AssetDiscoveryStatusStats, opts ...dcl.ApplyOption) *AssetDiscoveryStatusStats { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AssetDiscoveryStatusStats{} + + if dcl.IsZeroValue(des.DataItems) || (dcl.IsEmptyValueIndirect(des.DataItems) && dcl.IsEmptyValueIndirect(initial.DataItems)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataItems = initial.DataItems + } else { + cDes.DataItems = des.DataItems + } + if dcl.IsZeroValue(des.DataSize) || (dcl.IsEmptyValueIndirect(des.DataSize) && dcl.IsEmptyValueIndirect(initial.DataSize)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataSize = initial.DataSize + } else { + cDes.DataSize = des.DataSize + } + if dcl.IsZeroValue(des.Tables) || (dcl.IsEmptyValueIndirect(des.Tables) && dcl.IsEmptyValueIndirect(initial.Tables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Tables = initial.Tables + } else { + cDes.Tables = des.Tables + } + if dcl.IsZeroValue(des.Filesets) || (dcl.IsEmptyValueIndirect(des.Filesets) && dcl.IsEmptyValueIndirect(initial.Filesets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Filesets = initial.Filesets + } else { + cDes.Filesets = des.Filesets + } + + return cDes +} + +func canonicalizeAssetDiscoveryStatusStatsSlice(des, initial []AssetDiscoveryStatusStats, opts ...dcl.ApplyOption) []AssetDiscoveryStatusStats { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AssetDiscoveryStatusStats, 0, len(des)) + for _, d := range des { + cd := canonicalizeAssetDiscoveryStatusStats(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AssetDiscoveryStatusStats, 0, len(des)) + for i, d := range des { + cd := canonicalizeAssetDiscoveryStatusStats(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAssetDiscoveryStatusStats(c *Client, des, nw *AssetDiscoveryStatusStats) *AssetDiscoveryStatusStats { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AssetDiscoveryStatusStats while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewAssetDiscoveryStatusStatsSet(c *Client, des, nw []AssetDiscoveryStatusStats) []AssetDiscoveryStatusStats { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AssetDiscoveryStatusStats + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAssetDiscoveryStatusStatsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAssetDiscoveryStatusStats(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAssetDiscoveryStatusStatsSlice(c *Client, des, nw []AssetDiscoveryStatusStats) []AssetDiscoveryStatusStats { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AssetDiscoveryStatusStats + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAssetDiscoveryStatusStats(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffAsset(c *Client, desired, actual *Asset, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSpec, actual.ResourceSpec, dcl.DiffInfo{ObjectFunction: compareAssetResourceSpecNewStyle, EmptyObject: EmptyAssetResourceSpec, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceStatus, actual.ResourceStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetResourceStatusNewStyle, EmptyObject: EmptyAssetResourceStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityStatus, actual.SecurityStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetSecurityStatusNewStyle, EmptyObject: EmptyAssetSecurityStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoverySpec, actual.DiscoverySpec, dcl.DiffInfo{ObjectFunction: compareAssetDiscoverySpecNewStyle, EmptyObject: EmptyAssetDiscoverySpec, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DiscoverySpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoveryStatus, actual.DiscoveryStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareAssetDiscoveryStatusNewStyle, EmptyObject: EmptyAssetDiscoveryStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiscoveryStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Lake, actual.Lake, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Lake")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DataplexZone, actual.DataplexZone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareAssetResourceSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetResourceSpec) + if !ok { + desiredNotPointer, ok := d.(AssetResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceSpec or *AssetResourceSpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetResourceSpec) + if !ok { + actualNotPointer, ok := a.(AssetResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceSpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReadAccessMode, actual.ReadAccessMode, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("ReadAccessMode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetResourceStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetResourceStatus) + if !ok { + desiredNotPointer, ok := d.(AssetResourceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceStatus or *AssetResourceStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetResourceStatus) + if !ok { + actualNotPointer, ok := a.(AssetResourceStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetResourceStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetSecurityStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetSecurityStatus) + if !ok { + desiredNotPointer, ok := d.(AssetSecurityStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetSecurityStatus or *AssetSecurityStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetSecurityStatus) + if !ok { + actualNotPointer, ok := a.(AssetSecurityStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetSecurityStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpec) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpec or *AssetDiscoverySpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpec) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IncludePatterns, actual.IncludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("IncludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExcludePatterns, actual.ExcludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("ExcludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CsvOptions, actual.CsvOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareAssetDiscoverySpecCsvOptionsNewStyle, EmptyObject: EmptyAssetDiscoverySpecCsvOptions, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("CsvOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JsonOptions, actual.JsonOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareAssetDiscoverySpecJsonOptionsNewStyle, EmptyObject: EmptyAssetDiscoverySpecJsonOptions, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("JsonOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Schedule, actual.Schedule, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Schedule")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecCsvOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpecCsvOptions) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecCsvOptions or *AssetDiscoverySpecCsvOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpecCsvOptions) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecCsvOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HeaderRows, actual.HeaderRows, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("HeaderRows")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Delimiter, actual.Delimiter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Delimiter")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoverySpecJsonOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoverySpecJsonOptions) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecJsonOptions or *AssetDiscoverySpecJsonOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoverySpecJsonOptions) + if !ok { + actualNotPointer, ok := a.(AssetDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoverySpecJsonOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoveryStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoveryStatus) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoveryStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatus or *AssetDiscoveryStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoveryStatus) + if !ok { + actualNotPointer, ok := a.(AssetDiscoveryStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LastRunTime, actual.LastRunTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("LastRunTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Stats, actual.Stats, dcl.DiffInfo{ObjectFunction: compareAssetDiscoveryStatusStatsNewStyle, EmptyObject: EmptyAssetDiscoveryStatusStats, OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Stats")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LastRunDuration, actual.LastRunDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("LastRunDuration")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAssetDiscoveryStatusStatsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AssetDiscoveryStatusStats) + if !ok { + desiredNotPointer, ok := d.(AssetDiscoveryStatusStats) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatusStats or *AssetDiscoveryStatusStats", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AssetDiscoveryStatusStats) + if !ok { + actualNotPointer, ok := a.(AssetDiscoveryStatusStats) + if !ok { + return nil, fmt.Errorf("obj %v is not a AssetDiscoveryStatusStats", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataItems, actual.DataItems, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DataItems")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DataSize, actual.DataSize, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("DataSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tables, actual.Tables, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Tables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Filesets, actual.Filesets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAssetUpdateAssetOperation")}, fn.AddNest("Filesets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Asset) urlNormalized() *Asset { + normalized := dcl.Copy(*r).(Asset) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Lake = dcl.SelfLinkToName(r.Lake) + normalized.DataplexZone = dcl.SelfLinkToName(r.DataplexZone) + return &normalized +} + +func (r *Asset) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAsset" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Asset resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Asset) marshal(c *Client) ([]byte, error) { + m, err := expandAsset(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Asset: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalAsset decodes JSON responses into the Asset resource schema. +func unmarshalAsset(b []byte, c *Client, res *Asset) (*Asset, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapAsset(m, c, res) +} + +func unmarshalMapAsset(m map[string]interface{}, c *Client, res *Asset) (*Asset, error) { + + flattened := flattenAsset(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandAsset expands Asset into a JSON request object. +func expandAsset(c *Client, f *Asset) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.DataplexZone), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandAssetResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceSpec"] = v + } + if v, err := expandAssetDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["discoverySpec"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Lake into lake: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lake"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding DataplexZone into zone: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["zone"] = v + } + + return m, nil +} + +// flattenAsset flattens Asset from a JSON request object into the +// Asset type. +func flattenAsset(c *Client, i interface{}, res *Asset) *Asset { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Asset{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenAssetStateEnum(m["state"]) + resultRes.ResourceSpec = flattenAssetResourceSpec(c, m["resourceSpec"], res) + resultRes.ResourceStatus = flattenAssetResourceStatus(c, m["resourceStatus"], res) + resultRes.SecurityStatus = flattenAssetSecurityStatus(c, m["securityStatus"], res) + resultRes.DiscoverySpec = flattenAssetDiscoverySpec(c, m["discoverySpec"], res) + resultRes.DiscoveryStatus = flattenAssetDiscoveryStatus(c, m["discoveryStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Lake = dcl.FlattenString(m["lake"]) + resultRes.DataplexZone = dcl.FlattenString(m["zone"]) + + return resultRes +} + +// expandAssetResourceSpecMap expands the contents of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpecMap(c *Client, f map[string]AssetResourceSpec, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetResourceSpecSlice expands the contents of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpecSlice(c *Client, f []AssetResourceSpec, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetResourceSpecMap flattens the contents of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpecMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpec{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpec{} + } + + items := make(map[string]AssetResourceSpec) + for k, item := range a { + items[k] = *flattenAssetResourceSpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetResourceSpecSlice flattens the contents of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpecSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpec { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpec{} + } + + if len(a) == 0 { + return []AssetResourceSpec{} + } + + items := make([]AssetResourceSpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetResourceSpec expands an instance of AssetResourceSpec into a JSON +// request object. +func expandAssetResourceSpec(c *Client, f *AssetResourceSpec, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Type; !dcl.IsEmptyValueIndirect(v) { + m["type"] = v + } + if v := f.ReadAccessMode; !dcl.IsEmptyValueIndirect(v) { + m["readAccessMode"] = v + } + + return m, nil +} + +// flattenAssetResourceSpec flattens an instance of AssetResourceSpec from a JSON +// response object. +func flattenAssetResourceSpec(c *Client, i interface{}, res *Asset) *AssetResourceSpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetResourceSpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetResourceSpec + } + r.Name = dcl.FlattenString(m["name"]) + r.Type = flattenAssetResourceSpecTypeEnum(m["type"]) + r.ReadAccessMode = flattenAssetResourceSpecReadAccessModeEnum(m["readAccessMode"]) + + return r +} + +// expandAssetResourceStatusMap expands the contents of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatusMap(c *Client, f map[string]AssetResourceStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetResourceStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetResourceStatusSlice expands the contents of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatusSlice(c *Client, f []AssetResourceStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetResourceStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetResourceStatusMap flattens the contents of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceStatus{} + } + + if len(a) == 0 { + return map[string]AssetResourceStatus{} + } + + items := make(map[string]AssetResourceStatus) + for k, item := range a { + items[k] = *flattenAssetResourceStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetResourceStatusSlice flattens the contents of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatusSlice(c *Client, i interface{}, res *Asset) []AssetResourceStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceStatus{} + } + + if len(a) == 0 { + return []AssetResourceStatus{} + } + + items := make([]AssetResourceStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetResourceStatus expands an instance of AssetResourceStatus into a JSON +// request object. +func expandAssetResourceStatus(c *Client, f *AssetResourceStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenAssetResourceStatus flattens an instance of AssetResourceStatus from a JSON +// response object. +func flattenAssetResourceStatus(c *Client, i interface{}, res *Asset) *AssetResourceStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetResourceStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetResourceStatus + } + r.State = flattenAssetResourceStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandAssetSecurityStatusMap expands the contents of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatusMap(c *Client, f map[string]AssetSecurityStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetSecurityStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetSecurityStatusSlice expands the contents of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatusSlice(c *Client, f []AssetSecurityStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetSecurityStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetSecurityStatusMap flattens the contents of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetSecurityStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetSecurityStatus{} + } + + if len(a) == 0 { + return map[string]AssetSecurityStatus{} + } + + items := make(map[string]AssetSecurityStatus) + for k, item := range a { + items[k] = *flattenAssetSecurityStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetSecurityStatusSlice flattens the contents of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatusSlice(c *Client, i interface{}, res *Asset) []AssetSecurityStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetSecurityStatus{} + } + + if len(a) == 0 { + return []AssetSecurityStatus{} + } + + items := make([]AssetSecurityStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetSecurityStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetSecurityStatus expands an instance of AssetSecurityStatus into a JSON +// request object. +func expandAssetSecurityStatus(c *Client, f *AssetSecurityStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + + return m, nil +} + +// flattenAssetSecurityStatus flattens an instance of AssetSecurityStatus from a JSON +// response object. +func flattenAssetSecurityStatus(c *Client, i interface{}, res *Asset) *AssetSecurityStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetSecurityStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetSecurityStatus + } + r.State = flattenAssetSecurityStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandAssetDiscoverySpecMap expands the contents of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpecMap(c *Client, f map[string]AssetDiscoverySpec, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecSlice expands the contents of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpecSlice(c *Client, f []AssetDiscoverySpec, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecMap flattens the contents of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpecMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpec{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpec{} + } + + items := make(map[string]AssetDiscoverySpec) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecSlice flattens the contents of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpecSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpec { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpec{} + } + + if len(a) == 0 { + return []AssetDiscoverySpec{} + } + + items := make([]AssetDiscoverySpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpec expands an instance of AssetDiscoverySpec into a JSON +// request object. +func expandAssetDiscoverySpec(c *Client, f *AssetDiscoverySpec, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.IncludePatterns; v != nil { + m["includePatterns"] = v + } + if v := f.ExcludePatterns; v != nil { + m["excludePatterns"] = v + } + if v, err := expandAssetDiscoverySpecCsvOptions(c, f.CsvOptions, res); err != nil { + return nil, fmt.Errorf("error expanding CsvOptions into csvOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["csvOptions"] = v + } + if v, err := expandAssetDiscoverySpecJsonOptions(c, f.JsonOptions, res); err != nil { + return nil, fmt.Errorf("error expanding JsonOptions into jsonOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["jsonOptions"] = v + } + if v := f.Schedule; !dcl.IsEmptyValueIndirect(v) { + m["schedule"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpec flattens an instance of AssetDiscoverySpec from a JSON +// response object. +func flattenAssetDiscoverySpec(c *Client, i interface{}, res *Asset) *AssetDiscoverySpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpec + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.IncludePatterns = dcl.FlattenStringSlice(m["includePatterns"]) + r.ExcludePatterns = dcl.FlattenStringSlice(m["excludePatterns"]) + r.CsvOptions = flattenAssetDiscoverySpecCsvOptions(c, m["csvOptions"], res) + r.JsonOptions = flattenAssetDiscoverySpecJsonOptions(c, m["jsonOptions"], res) + r.Schedule = dcl.FlattenString(m["schedule"]) + + return r +} + +// expandAssetDiscoverySpecCsvOptionsMap expands the contents of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptionsMap(c *Client, f map[string]AssetDiscoverySpecCsvOptions, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecCsvOptionsSlice expands the contents of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptionsSlice(c *Client, f []AssetDiscoverySpecCsvOptions, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecCsvOptionsMap flattens the contents of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptionsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpecCsvOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpecCsvOptions{} + } + + items := make(map[string]AssetDiscoverySpecCsvOptions) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecCsvOptionsSlice flattens the contents of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptionsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpecCsvOptions { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return []AssetDiscoverySpecCsvOptions{} + } + + items := make([]AssetDiscoverySpecCsvOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpecCsvOptions expands an instance of AssetDiscoverySpecCsvOptions into a JSON +// request object. +func expandAssetDiscoverySpecCsvOptions(c *Client, f *AssetDiscoverySpecCsvOptions, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HeaderRows; !dcl.IsEmptyValueIndirect(v) { + m["headerRows"] = v + } + if v := f.Delimiter; !dcl.IsEmptyValueIndirect(v) { + m["delimiter"] = v + } + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpecCsvOptions flattens an instance of AssetDiscoverySpecCsvOptions from a JSON +// response object. +func flattenAssetDiscoverySpecCsvOptions(c *Client, i interface{}, res *Asset) *AssetDiscoverySpecCsvOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpecCsvOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpecCsvOptions + } + r.HeaderRows = dcl.FlattenInteger(m["headerRows"]) + r.Delimiter = dcl.FlattenString(m["delimiter"]) + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandAssetDiscoverySpecJsonOptionsMap expands the contents of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptionsMap(c *Client, f map[string]AssetDiscoverySpecJsonOptions, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoverySpecJsonOptionsSlice expands the contents of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptionsSlice(c *Client, f []AssetDiscoverySpecJsonOptions, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoverySpecJsonOptionsMap flattens the contents of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptionsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoverySpecJsonOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return map[string]AssetDiscoverySpecJsonOptions{} + } + + items := make(map[string]AssetDiscoverySpecJsonOptions) + for k, item := range a { + items[k] = *flattenAssetDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoverySpecJsonOptionsSlice flattens the contents of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptionsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoverySpecJsonOptions { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return []AssetDiscoverySpecJsonOptions{} + } + + items := make([]AssetDiscoverySpecJsonOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoverySpecJsonOptions expands an instance of AssetDiscoverySpecJsonOptions into a JSON +// request object. +func expandAssetDiscoverySpecJsonOptions(c *Client, f *AssetDiscoverySpecJsonOptions, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenAssetDiscoverySpecJsonOptions flattens an instance of AssetDiscoverySpecJsonOptions from a JSON +// response object. +func flattenAssetDiscoverySpecJsonOptions(c *Client, i interface{}, res *Asset) *AssetDiscoverySpecJsonOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoverySpecJsonOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoverySpecJsonOptions + } + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandAssetDiscoveryStatusMap expands the contents of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatusMap(c *Client, f map[string]AssetDiscoveryStatus, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoveryStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoveryStatusSlice expands the contents of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatusSlice(c *Client, f []AssetDiscoveryStatus, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoveryStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoveryStatusMap flattens the contents of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatusMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatus{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatus{} + } + + items := make(map[string]AssetDiscoveryStatus) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoveryStatusSlice flattens the contents of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatusSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatus { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatus{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatus{} + } + + items := make([]AssetDiscoveryStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoveryStatus expands an instance of AssetDiscoveryStatus into a JSON +// request object. +func expandAssetDiscoveryStatus(c *Client, f *AssetDiscoveryStatus, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.LastRunTime; !dcl.IsEmptyValueIndirect(v) { + m["lastRunTime"] = v + } + if v, err := expandAssetDiscoveryStatusStats(c, f.Stats, res); err != nil { + return nil, fmt.Errorf("error expanding Stats into stats: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["stats"] = v + } + if v := f.LastRunDuration; !dcl.IsEmptyValueIndirect(v) { + m["lastRunDuration"] = v + } + + return m, nil +} + +// flattenAssetDiscoveryStatus flattens an instance of AssetDiscoveryStatus from a JSON +// response object. +func flattenAssetDiscoveryStatus(c *Client, i interface{}, res *Asset) *AssetDiscoveryStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoveryStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoveryStatus + } + r.State = flattenAssetDiscoveryStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.LastRunTime = dcl.FlattenString(m["lastRunTime"]) + r.Stats = flattenAssetDiscoveryStatusStats(c, m["stats"], res) + r.LastRunDuration = dcl.FlattenString(m["lastRunDuration"]) + + return r +} + +// expandAssetDiscoveryStatusStatsMap expands the contents of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStatsMap(c *Client, f map[string]AssetDiscoveryStatusStats, res *Asset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAssetDiscoveryStatusStats(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAssetDiscoveryStatusStatsSlice expands the contents of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStatsSlice(c *Client, f []AssetDiscoveryStatusStats, res *Asset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAssetDiscoveryStatusStats(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAssetDiscoveryStatusStatsMap flattens the contents of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStatsMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatusStats { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatusStats{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatusStats{} + } + + items := make(map[string]AssetDiscoveryStatusStats) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatusStats(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAssetDiscoveryStatusStatsSlice flattens the contents of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStatsSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatusStats { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatusStats{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatusStats{} + } + + items := make([]AssetDiscoveryStatusStats, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatusStats(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAssetDiscoveryStatusStats expands an instance of AssetDiscoveryStatusStats into a JSON +// request object. +func expandAssetDiscoveryStatusStats(c *Client, f *AssetDiscoveryStatusStats, res *Asset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataItems; !dcl.IsEmptyValueIndirect(v) { + m["dataItems"] = v + } + if v := f.DataSize; !dcl.IsEmptyValueIndirect(v) { + m["dataSize"] = v + } + if v := f.Tables; !dcl.IsEmptyValueIndirect(v) { + m["tables"] = v + } + if v := f.Filesets; !dcl.IsEmptyValueIndirect(v) { + m["filesets"] = v + } + + return m, nil +} + +// flattenAssetDiscoveryStatusStats flattens an instance of AssetDiscoveryStatusStats from a JSON +// response object. +func flattenAssetDiscoveryStatusStats(c *Client, i interface{}, res *Asset) *AssetDiscoveryStatusStats { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AssetDiscoveryStatusStats{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAssetDiscoveryStatusStats + } + r.DataItems = dcl.FlattenInteger(m["dataItems"]) + r.DataSize = dcl.FlattenInteger(m["dataSize"]) + r.Tables = dcl.FlattenInteger(m["tables"]) + r.Filesets = dcl.FlattenInteger(m["filesets"]) + + return r +} + +// flattenAssetStateEnumMap flattens the contents of AssetStateEnum from a JSON +// response object. +func flattenAssetStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetStateEnum{} + } + + items := make(map[string]AssetStateEnum) + for k, item := range a { + items[k] = *flattenAssetStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetStateEnumSlice flattens the contents of AssetStateEnum from a JSON +// response object. +func flattenAssetStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetStateEnum{} + } + + if len(a) == 0 { + return []AssetStateEnum{} + } + + items := make([]AssetStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetStateEnum with the same value as that string. +func flattenAssetStateEnum(i interface{}) *AssetStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetStateEnumRef(s) +} + +// flattenAssetResourceSpecTypeEnumMap flattens the contents of AssetResourceSpecTypeEnum from a JSON +// response object. +func flattenAssetResourceSpecTypeEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpecTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpecTypeEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpecTypeEnum{} + } + + items := make(map[string]AssetResourceSpecTypeEnum) + for k, item := range a { + items[k] = *flattenAssetResourceSpecTypeEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceSpecTypeEnumSlice flattens the contents of AssetResourceSpecTypeEnum from a JSON +// response object. +func flattenAssetResourceSpecTypeEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpecTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpecTypeEnum{} + } + + if len(a) == 0 { + return []AssetResourceSpecTypeEnum{} + } + + items := make([]AssetResourceSpecTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpecTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceSpecTypeEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceSpecTypeEnum with the same value as that string. +func flattenAssetResourceSpecTypeEnum(i interface{}) *AssetResourceSpecTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceSpecTypeEnumRef(s) +} + +// flattenAssetResourceSpecReadAccessModeEnumMap flattens the contents of AssetResourceSpecReadAccessModeEnum from a JSON +// response object. +func flattenAssetResourceSpecReadAccessModeEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceSpecReadAccessModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceSpecReadAccessModeEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceSpecReadAccessModeEnum{} + } + + items := make(map[string]AssetResourceSpecReadAccessModeEnum) + for k, item := range a { + items[k] = *flattenAssetResourceSpecReadAccessModeEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceSpecReadAccessModeEnumSlice flattens the contents of AssetResourceSpecReadAccessModeEnum from a JSON +// response object. +func flattenAssetResourceSpecReadAccessModeEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceSpecReadAccessModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceSpecReadAccessModeEnum{} + } + + if len(a) == 0 { + return []AssetResourceSpecReadAccessModeEnum{} + } + + items := make([]AssetResourceSpecReadAccessModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceSpecReadAccessModeEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceSpecReadAccessModeEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceSpecReadAccessModeEnum with the same value as that string. +func flattenAssetResourceSpecReadAccessModeEnum(i interface{}) *AssetResourceSpecReadAccessModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceSpecReadAccessModeEnumRef(s) +} + +// flattenAssetResourceStatusStateEnumMap flattens the contents of AssetResourceStatusStateEnum from a JSON +// response object. +func flattenAssetResourceStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetResourceStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetResourceStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetResourceStatusStateEnum{} + } + + items := make(map[string]AssetResourceStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetResourceStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetResourceStatusStateEnumSlice flattens the contents of AssetResourceStatusStateEnum from a JSON +// response object. +func flattenAssetResourceStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetResourceStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetResourceStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetResourceStatusStateEnum{} + } + + items := make([]AssetResourceStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetResourceStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetResourceStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetResourceStatusStateEnum with the same value as that string. +func flattenAssetResourceStatusStateEnum(i interface{}) *AssetResourceStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetResourceStatusStateEnumRef(s) +} + +// flattenAssetSecurityStatusStateEnumMap flattens the contents of AssetSecurityStatusStateEnum from a JSON +// response object. +func flattenAssetSecurityStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetSecurityStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetSecurityStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetSecurityStatusStateEnum{} + } + + items := make(map[string]AssetSecurityStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetSecurityStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetSecurityStatusStateEnumSlice flattens the contents of AssetSecurityStatusStateEnum from a JSON +// response object. +func flattenAssetSecurityStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetSecurityStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetSecurityStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetSecurityStatusStateEnum{} + } + + items := make([]AssetSecurityStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetSecurityStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetSecurityStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetSecurityStatusStateEnum with the same value as that string. +func flattenAssetSecurityStatusStateEnum(i interface{}) *AssetSecurityStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetSecurityStatusStateEnumRef(s) +} + +// flattenAssetDiscoveryStatusStateEnumMap flattens the contents of AssetDiscoveryStatusStateEnum from a JSON +// response object. +func flattenAssetDiscoveryStatusStateEnumMap(c *Client, i interface{}, res *Asset) map[string]AssetDiscoveryStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AssetDiscoveryStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]AssetDiscoveryStatusStateEnum{} + } + + items := make(map[string]AssetDiscoveryStatusStateEnum) + for k, item := range a { + items[k] = *flattenAssetDiscoveryStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenAssetDiscoveryStatusStateEnumSlice flattens the contents of AssetDiscoveryStatusStateEnum from a JSON +// response object. +func flattenAssetDiscoveryStatusStateEnumSlice(c *Client, i interface{}, res *Asset) []AssetDiscoveryStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []AssetDiscoveryStatusStateEnum{} + } + + if len(a) == 0 { + return []AssetDiscoveryStatusStateEnum{} + } + + items := make([]AssetDiscoveryStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAssetDiscoveryStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenAssetDiscoveryStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *AssetDiscoveryStatusStateEnum with the same value as that string. +func flattenAssetDiscoveryStatusStateEnum(i interface{}) *AssetDiscoveryStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return AssetDiscoveryStatusStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Asset) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalAsset(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.DataplexZone == nil && ncr.DataplexZone == nil { + c.Config.Logger.Info("Both DataplexZone fields null - considering equal.") + } else if nr.DataplexZone == nil || ncr.DataplexZone == nil { + c.Config.Logger.Info("Only one DataplexZone field is null - considering unequal.") + return false + } else if *nr.DataplexZone != *ncr.DataplexZone { + return false + } + if nr.Lake == nil && ncr.Lake == nil { + c.Config.Logger.Info("Both Lake fields null - considering equal.") + } else if nr.Lake == nil || ncr.Lake == nil { + c.Config.Logger.Info("Only one Lake field is null - considering unequal.") + return false + } else if *nr.Lake != *ncr.Lake { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type assetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp assetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToAssetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]assetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []assetDiff + // For each operation name, create a assetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := assetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToAssetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToAssetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (assetApiOperation, error) { + switch opName { + + case "updateAssetUpdateAssetOperation": + return &updateAssetUpdateAssetOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractAssetFields(r *Asset) error { + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &AssetResourceSpec{} + } + if err := extractAssetResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vResourceStatus := r.ResourceStatus + if vResourceStatus == nil { + // note: explicitly not the empty object. + vResourceStatus = &AssetResourceStatus{} + } + if err := extractAssetResourceStatusFields(r, vResourceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceStatus) { + r.ResourceStatus = vResourceStatus + } + vSecurityStatus := r.SecurityStatus + if vSecurityStatus == nil { + // note: explicitly not the empty object. + vSecurityStatus = &AssetSecurityStatus{} + } + if err := extractAssetSecurityStatusFields(r, vSecurityStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityStatus) { + r.SecurityStatus = vSecurityStatus + } + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &AssetDiscoverySpec{} + } + if err := extractAssetDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vDiscoveryStatus := r.DiscoveryStatus + if vDiscoveryStatus == nil { + // note: explicitly not the empty object. + vDiscoveryStatus = &AssetDiscoveryStatus{} + } + if err := extractAssetDiscoveryStatusFields(r, vDiscoveryStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoveryStatus) { + r.DiscoveryStatus = vDiscoveryStatus + } + return nil +} +func extractAssetResourceSpecFields(r *Asset, o *AssetResourceSpec) error { + return nil +} +func extractAssetResourceStatusFields(r *Asset, o *AssetResourceStatus) error { + return nil +} +func extractAssetSecurityStatusFields(r *Asset, o *AssetSecurityStatus) error { + return nil +} +func extractAssetDiscoverySpecFields(r *Asset, o *AssetDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &AssetDiscoverySpecCsvOptions{} + } + if err := extractAssetDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &AssetDiscoverySpecJsonOptions{} + } + if err := extractAssetDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func extractAssetDiscoverySpecCsvOptionsFields(r *Asset, o *AssetDiscoverySpecCsvOptions) error { + return nil +} +func extractAssetDiscoverySpecJsonOptionsFields(r *Asset, o *AssetDiscoverySpecJsonOptions) error { + return nil +} +func extractAssetDiscoveryStatusFields(r *Asset, o *AssetDiscoveryStatus) error { + vStats := o.Stats + if vStats == nil { + // note: explicitly not the empty object. + vStats = &AssetDiscoveryStatusStats{} + } + if err := extractAssetDiscoveryStatusStatsFields(r, vStats); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStats) { + o.Stats = vStats + } + return nil +} +func extractAssetDiscoveryStatusStatsFields(r *Asset, o *AssetDiscoveryStatusStats) error { + return nil +} + +func postReadExtractAssetFields(r *Asset) error { + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &AssetResourceSpec{} + } + if err := postReadExtractAssetResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vResourceStatus := r.ResourceStatus + if vResourceStatus == nil { + // note: explicitly not the empty object. + vResourceStatus = &AssetResourceStatus{} + } + if err := postReadExtractAssetResourceStatusFields(r, vResourceStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceStatus) { + r.ResourceStatus = vResourceStatus + } + vSecurityStatus := r.SecurityStatus + if vSecurityStatus == nil { + // note: explicitly not the empty object. + vSecurityStatus = &AssetSecurityStatus{} + } + if err := postReadExtractAssetSecurityStatusFields(r, vSecurityStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityStatus) { + r.SecurityStatus = vSecurityStatus + } + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &AssetDiscoverySpec{} + } + if err := postReadExtractAssetDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vDiscoveryStatus := r.DiscoveryStatus + if vDiscoveryStatus == nil { + // note: explicitly not the empty object. + vDiscoveryStatus = &AssetDiscoveryStatus{} + } + if err := postReadExtractAssetDiscoveryStatusFields(r, vDiscoveryStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoveryStatus) { + r.DiscoveryStatus = vDiscoveryStatus + } + return nil +} +func postReadExtractAssetResourceSpecFields(r *Asset, o *AssetResourceSpec) error { + return nil +} +func postReadExtractAssetResourceStatusFields(r *Asset, o *AssetResourceStatus) error { + return nil +} +func postReadExtractAssetSecurityStatusFields(r *Asset, o *AssetSecurityStatus) error { + return nil +} +func postReadExtractAssetDiscoverySpecFields(r *Asset, o *AssetDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &AssetDiscoverySpecCsvOptions{} + } + if err := extractAssetDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &AssetDiscoverySpecJsonOptions{} + } + if err := extractAssetDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func postReadExtractAssetDiscoverySpecCsvOptionsFields(r *Asset, o *AssetDiscoverySpecCsvOptions) error { + return nil +} +func postReadExtractAssetDiscoverySpecJsonOptionsFields(r *Asset, o *AssetDiscoverySpecJsonOptions) error { + return nil +} +func postReadExtractAssetDiscoveryStatusFields(r *Asset, o *AssetDiscoveryStatus) error { + vStats := o.Stats + if vStats == nil { + // note: explicitly not the empty object. + vStats = &AssetDiscoveryStatusStats{} + } + if err := extractAssetDiscoveryStatusStatsFields(r, vStats); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStats) { + o.Stats = vStats + } + return nil +} +func postReadExtractAssetDiscoveryStatusStatsFields(r *Asset, o *AssetDiscoveryStatusStats) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl new file mode 100644 index 000000000000..f2256349117a --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl @@ -0,0 +1,504 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLAssetSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataplex/Asset", + Description: "The Dataplex Asset resource", + StructName: "Asset", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Asset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "asset", + Required: true, + Description: "A full instance of a Asset", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Asset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "asset", + Required: true, + Description: "A full instance of a Asset", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Asset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "asset", + Required: true, + Description: "A full instance of a Asset", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Asset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "dataplexZone", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "lake", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Asset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "dataplexZone", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "lake", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Asset": &dcl.Component{ + Title: "Asset", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplex_zone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "resourceSpec", + "discoverySpec", + "project", + "location", + "lake", + "dataplexZone", + }, + Properties: map[string]*dcl.Property{ + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time when the asset was created.", + Immutable: true, + }, + "dataplexZone": &dcl.Property{ + Type: "string", + GoName: "DataplexZone", + Description: "The zone for the resource", + Immutable: true, + Parameter: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. Description of the asset.", + }, + "discoverySpec": &dcl.Property{ + Type: "object", + GoName: "DiscoverySpec", + GoType: "AssetDiscoverySpec", + Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + Required: []string{ + "enabled", + }, + Properties: map[string]*dcl.Property{ + "csvOptions": &dcl.Property{ + Type: "object", + GoName: "CsvOptions", + GoType: "AssetDiscoverySpecCsvOptions", + Description: "Optional. Configuration for CSV data.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "delimiter": &dcl.Property{ + Type: "string", + GoName: "Delimiter", + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + "disableTypeInference": &dcl.Property{ + Type: "boolean", + GoName: "DisableTypeInference", + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + "encoding": &dcl.Property{ + Type: "string", + GoName: "Encoding", + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + "headerRows": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "HeaderRows", + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + }, + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Required. Whether discovery is enabled.", + }, + "excludePatterns": &dcl.Property{ + Type: "array", + GoName: "ExcludePatterns", + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "includePatterns": &dcl.Property{ + Type: "array", + GoName: "IncludePatterns", + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "jsonOptions": &dcl.Property{ + Type: "object", + GoName: "JsonOptions", + GoType: "AssetDiscoverySpecJsonOptions", + Description: "Optional. Configuration for Json data.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "disableTypeInference": &dcl.Property{ + Type: "boolean", + GoName: "DisableTypeInference", + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + "encoding": &dcl.Property{ + Type: "string", + GoName: "Encoding", + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + }, + "schedule": &dcl.Property{ + Type: "string", + GoName: "Schedule", + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + }, + "discoveryStatus": &dcl.Property{ + Type: "object", + GoName: "DiscoveryStatus", + GoType: "AssetDiscoveryStatus", + ReadOnly: true, + Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", + Properties: map[string]*dcl.Property{ + "lastRunDuration": &dcl.Property{ + Type: "string", + GoName: "LastRunDuration", + Description: "The duration of the last discovery run.", + }, + "lastRunTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "LastRunTime", + Description: "The start time of the last discovery run.", + }, + "message": &dcl.Property{ + Type: "string", + GoName: "Message", + Description: "Additional information about the current state.", + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "AssetDiscoveryStatusStateEnum", + Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", + Enum: []string{ + "STATE_UNSPECIFIED", + "SCHEDULED", + "IN_PROGRESS", + "PAUSED", + "DISABLED", + }, + }, + "stats": &dcl.Property{ + Type: "object", + GoName: "Stats", + GoType: "AssetDiscoveryStatusStats", + Description: "Data Stats of the asset reported by discovery.", + Properties: map[string]*dcl.Property{ + "dataItems": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "DataItems", + Description: "The count of data items within the referenced resource.", + }, + "dataSize": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "DataSize", + Description: "The number of stored data bytes within the referenced resource.", + }, + "filesets": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Filesets", + Description: "The count of fileset entities within the referenced resource.", + }, + "tables": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Tables", + Description: "The count of table entities within the referenced resource.", + }, + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the status.", + }, + }, + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Optional. User friendly display name.", + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. User defined labels for the asset.", + }, + "lake": &dcl.Property{ + Type: "string", + GoName: "Lake", + Description: "The lake for the resource", + Immutable: true, + Parameter: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of the asset.", + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "resourceSpec": &dcl.Property{ + Type: "object", + GoName: "ResourceSpec", + GoType: "AssetResourceSpec", + Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", + Required: []string{ + "type", + }, + Properties: map[string]*dcl.Property{ + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", + Immutable: true, + }, + "readAccessMode": &dcl.Property{ + Type: "string", + GoName: "ReadAccessMode", + GoType: "AssetResourceSpecReadAccessModeEnum", + Description: "Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED", + ServerDefault: true, + Enum: []string{ + "DIRECT", + "MANAGED", + }, + }, + "type": &dcl.Property{ + Type: "string", + GoName: "Type", + GoType: "AssetResourceSpecTypeEnum", + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", + Immutable: true, + Enum: []string{ + "STORAGE_BUCKET", + "BIGQUERY_DATASET", + }, + }, + }, + }, + "resourceStatus": &dcl.Property{ + Type: "object", + GoName: "ResourceStatus", + GoType: "AssetResourceStatus", + ReadOnly: true, + Description: "Output only. Status of the resource referenced by this asset.", + Properties: map[string]*dcl.Property{ + "message": &dcl.Property{ + Type: "string", + GoName: "Message", + Description: "Additional information about the current state.", + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "AssetResourceStatusStateEnum", + Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", + Enum: []string{ + "STATE_UNSPECIFIED", + "READY", + "ERROR", + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the status.", + }, + }, + }, + "securityStatus": &dcl.Property{ + Type: "object", + GoName: "SecurityStatus", + GoType: "AssetSecurityStatus", + ReadOnly: true, + Description: "Output only. Status of the security policy applied to resource referenced by this asset.", + Properties: map[string]*dcl.Property{ + "message": &dcl.Property{ + Type: "string", + GoName: "Message", + Description: "Additional information about the current state.", + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "AssetSecurityStatusStateEnum", + Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", + Enum: []string{ + "STATE_UNSPECIFIED", + "READY", + "APPLYING", + "ERROR", + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the status.", + }, + }, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "AssetStateEnum", + ReadOnly: true, + Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "ACTIVE", + "CREATING", + "DELETING", + "ACTION_REQUIRED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time when the asset was last updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/client.go.tmpl b/mmv1/third_party/terraform/services/dataplex/client.go.tmpl new file mode 100644 index 000000000000..77a502b18760 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/client.go.tmpl @@ -0,0 +1,18 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl b/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl new file mode 100644 index 000000000000..113ae996c819 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl @@ -0,0 +1,11 @@ +package dataplex + +// flattenZoneDiscoverySpecEnable flattens an instance of discovery spec from a JSON +// response object. +func flattenZoneDiscoverySpecEnable(c *Client, i any, _ *Zone) *bool { + v, ok := i.(bool) + if !ok { + v = false + } + return &v +} diff --git a/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl new file mode 100644 index 000000000000..2fca89b04ce8 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl @@ -0,0 +1,603 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Lake struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *LakeStateEnum `json:"state"` + ServiceAccount *string `json:"serviceAccount"` + Metastore *LakeMetastore `json:"metastore"` + AssetStatus *LakeAssetStatus `json:"assetStatus"` + MetastoreStatus *LakeMetastoreStatus `json:"metastoreStatus"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *Lake) String() string { + return dcl.SprintResource(r) +} + +// The enum LakeStateEnum. +type LakeStateEnum string + +// LakeStateEnumRef returns a *LakeStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func LakeStateEnumRef(s string) *LakeStateEnum { + v := LakeStateEnum(s) + return &v +} + +func (v LakeStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "LakeStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum LakeMetastoreStatusStateEnum. +type LakeMetastoreStatusStateEnum string + +// LakeMetastoreStatusStateEnumRef returns a *LakeMetastoreStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func LakeMetastoreStatusStateEnumRef(s string) *LakeMetastoreStatusStateEnum { + v := LakeMetastoreStatusStateEnum(s) + return &v +} + +func (v LakeMetastoreStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "NONE", "READY", "UPDATING", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "LakeMetastoreStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +type LakeMetastore struct { + empty bool `json:"-"` + Service *string `json:"service"` +} + +type jsonLakeMetastore LakeMetastore + +func (r *LakeMetastore) UnmarshalJSON(data []byte) error { + var res jsonLakeMetastore + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeMetastore + } else { + + r.Service = res.Service + + } + return nil +} + +// This object is used to assert a desired state where this LakeMetastore is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeMetastore *LakeMetastore = &LakeMetastore{empty: true} + +func (r *LakeMetastore) Empty() bool { + return r.empty +} + +func (r *LakeMetastore) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeMetastore) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type LakeAssetStatus struct { + empty bool `json:"-"` + UpdateTime *string `json:"updateTime"` + ActiveAssets *int64 `json:"activeAssets"` + SecurityPolicyApplyingAssets *int64 `json:"securityPolicyApplyingAssets"` +} + +type jsonLakeAssetStatus LakeAssetStatus + +func (r *LakeAssetStatus) UnmarshalJSON(data []byte) error { + var res jsonLakeAssetStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeAssetStatus + } else { + + r.UpdateTime = res.UpdateTime + + r.ActiveAssets = res.ActiveAssets + + r.SecurityPolicyApplyingAssets = res.SecurityPolicyApplyingAssets + + } + return nil +} + +// This object is used to assert a desired state where this LakeAssetStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeAssetStatus *LakeAssetStatus = &LakeAssetStatus{empty: true} + +func (r *LakeAssetStatus) Empty() bool { + return r.empty +} + +func (r *LakeAssetStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeAssetStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type LakeMetastoreStatus struct { + empty bool `json:"-"` + State *LakeMetastoreStatusStateEnum `json:"state"` + Message *string `json:"message"` + UpdateTime *string `json:"updateTime"` + Endpoint *string `json:"endpoint"` +} + +type jsonLakeMetastoreStatus LakeMetastoreStatus + +func (r *LakeMetastoreStatus) UnmarshalJSON(data []byte) error { + var res jsonLakeMetastoreStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyLakeMetastoreStatus + } else { + + r.State = res.State + + r.Message = res.Message + + r.UpdateTime = res.UpdateTime + + r.Endpoint = res.Endpoint + + } + return nil +} + +// This object is used to assert a desired state where this LakeMetastoreStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyLakeMetastoreStatus *LakeMetastoreStatus = &LakeMetastoreStatus{empty: true} + +func (r *LakeMetastoreStatus) Empty() bool { + return r.empty +} + +func (r *LakeMetastoreStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *LakeMetastoreStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Lake) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Lake", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Lake) ID() (string, error) { + if err := extractLakeFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "service_account": dcl.ValueOrEmptyString(nr.ServiceAccount), + "metastore": dcl.ValueOrEmptyString(nr.Metastore), + "asset_status": dcl.ValueOrEmptyString(nr.AssetStatus), + "metastore_status": dcl.ValueOrEmptyString(nr.MetastoreStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const LakeMaxPage = -1 + +type LakeList struct { + Items []*Lake + + nextToken string + + pageSize int32 + + resource *Lake +} + +func (l *LakeList) HasNext() bool { + return l.nextToken != "" +} + +func (l *LakeList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listLake(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListLake(ctx context.Context, project, location string) (*LakeList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListLakeWithMaxResults(ctx, project, location, LakeMaxPage) + +} + +func (c *Client) ListLakeWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*LakeList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Lake{ + Project: &project, + Location: &location, + } + items, token, err := c.listLake(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &LakeList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetLake(ctx context.Context, r *Lake) (*Lake, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractLakeFields(r) + + b, err := c.getLakeRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalLake(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeLakeNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractLakeFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteLake(ctx context.Context, r *Lake) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Lake resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Lake...") + deleteOp := deleteLakeOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllLake deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllLake(ctx context.Context, project, location string, filter func(*Lake) bool) error { + listObj, err := c.ListLake(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllLake(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllLake(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyLake(ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Lake + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyLakeHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyLakeHelper(c *Client, ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLake...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractLakeFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.lakeDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToLakeDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []lakeApiOperation + if create { + ops = append(ops, &createLakeOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyLakeDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyLakeDiff(c *Client, ctx context.Context, desired *Lake, rawDesired *Lake, ops []lakeApiOperation, opts ...dcl.ApplyOption) (*Lake, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetLake(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createLakeOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapLake(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeLakeNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeLakeNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeLakeDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractLakeFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractLakeFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffLake(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Lake) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl new file mode 100644 index 000000000000..ee2368b61e26 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/lake_internal.go.tmpl @@ -0,0 +1,2021 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Lake) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Metastore) { + if err := r.Metastore.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AssetStatus) { + if err := r.AssetStatus.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MetastoreStatus) { + if err := r.MetastoreStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *LakeMetastore) validate() error { + return nil +} +func (r *LakeAssetStatus) validate() error { + return nil +} +func (r *LakeMetastoreStatus) validate() error { + return nil +} +func (r *Lake) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Lake) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Lake) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes", nr.basePath(), userBasePath, params), nil + +} + +func (r *Lake) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes?lakeId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Lake) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Lake) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Lake) SetPolicyVerb() string { + return "" +} + +func (r *Lake) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Lake) IAMPolicyVersion() int { + return 3 +} + +// lakeApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type lakeApiOperation interface { + do(context.Context, *Lake, *Client) error +} + +// newUpdateLakeUpdateLakeRequest creates a request for an +// Lake resource's UpdateLake update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateLakeUpdateLakeRequest(ctx context.Context, f *Lake, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandLakeMetastore(c, f.Metastore, res); err != nil { + return nil, fmt.Errorf("error expanding Metastore into metastore: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["metastore"] = v + } + if v, err := expandLakeAssetStatus(c, f.AssetStatus, res); err != nil { + return nil, fmt.Errorf("error expanding AssetStatus into assetStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["assetStatus"] = v + } + if v, err := expandLakeMetastoreStatus(c, f.MetastoreStatus, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreStatus into metastoreStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["metastoreStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateLakeUpdateLakeRequest converts the update into +// the final JSON request body. +func marshalUpdateLakeUpdateLakeRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateLakeUpdateLakeOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateLakeUpdateLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + _, err := c.GetLake(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateLake") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateLakeUpdateLakeRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateLakeUpdateLakeRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listLakeRaw(ctx context.Context, r *Lake, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != LakeMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listLakeOperation struct { + Lakes []map[string]interface{} `json:"lakes"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listLake(ctx context.Context, r *Lake, pageToken string, pageSize int32) ([]*Lake, string, error) { + b, err := c.listLakeRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listLakeOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Lake + for _, v := range m.Lakes { + res, err := unmarshalMapLake(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllLake(ctx context.Context, f func(*Lake) bool, resources []*Lake) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteLake(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteLakeOperation struct{} + +func (op *deleteLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + r, err := c.GetLake(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Lake not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetLake checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetLake(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createLakeOperation struct { + response map[string]interface{} +} + +func (op *createLakeOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createLakeOperation) do(ctx context.Context, r *Lake, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetLake(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getLakeRaw(ctx context.Context, r *Lake) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) lakeDiffsForRawDesired(ctx context.Context, rawDesired *Lake, opts ...dcl.ApplyOption) (initial, desired *Lake, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Lake + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Lake); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Lake, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetLake(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Lake resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Lake resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Lake resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeLakeDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Lake: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Lake: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractLakeFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeLakeInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Lake: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeLakeDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Lake: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffLake(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeLakeInitialState(rawInitial, rawDesired *Lake) (*Lake, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeLakeDesiredState(rawDesired, rawInitial *Lake, opts ...dcl.ApplyOption) (*Lake, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Metastore = canonicalizeLakeMetastore(rawDesired.Metastore, nil, opts...) + rawDesired.AssetStatus = canonicalizeLakeAssetStatus(rawDesired.AssetStatus, nil, opts...) + rawDesired.MetastoreStatus = canonicalizeLakeMetastoreStatus(rawDesired.MetastoreStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Lake{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + canonicalDesired.Metastore = canonicalizeLakeMetastore(rawDesired.Metastore, rawInitial.Metastore, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeLakeNewState(c *Client, rawNew, rawDesired *Lake) (*Lake, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ServiceAccount) && dcl.IsEmptyValueIndirect(rawDesired.ServiceAccount) { + rawNew.ServiceAccount = rawDesired.ServiceAccount + } else { + if dcl.StringCanonicalize(rawDesired.ServiceAccount, rawNew.ServiceAccount) { + rawNew.ServiceAccount = rawDesired.ServiceAccount + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Metastore) && dcl.IsEmptyValueIndirect(rawDesired.Metastore) { + rawNew.Metastore = rawDesired.Metastore + } else { + rawNew.Metastore = canonicalizeNewLakeMetastore(c, rawDesired.Metastore, rawNew.Metastore) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssetStatus) && dcl.IsEmptyValueIndirect(rawDesired.AssetStatus) { + rawNew.AssetStatus = rawDesired.AssetStatus + } else { + rawNew.AssetStatus = canonicalizeNewLakeAssetStatus(c, rawDesired.AssetStatus, rawNew.AssetStatus) + } + + if dcl.IsEmptyValueIndirect(rawNew.MetastoreStatus) && dcl.IsEmptyValueIndirect(rawDesired.MetastoreStatus) { + rawNew.MetastoreStatus = rawDesired.MetastoreStatus + } else { + rawNew.MetastoreStatus = canonicalizeNewLakeMetastoreStatus(c, rawDesired.MetastoreStatus, rawNew.MetastoreStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeLakeMetastore(des, initial *LakeMetastore, opts ...dcl.ApplyOption) *LakeMetastore { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeMetastore{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + + return cDes +} + +func canonicalizeLakeMetastoreSlice(des, initial []LakeMetastore, opts ...dcl.ApplyOption) []LakeMetastore { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeMetastore, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeMetastore(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeMetastore, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeMetastore(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeMetastore(c *Client, des, nw *LakeMetastore) *LakeMetastore { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeMetastore while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + + return nw +} + +func canonicalizeNewLakeMetastoreSet(c *Client, des, nw []LakeMetastore) []LakeMetastore { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeMetastore + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeMetastoreNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeMetastore(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeMetastoreSlice(c *Client, des, nw []LakeMetastore) []LakeMetastore { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeMetastore + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeMetastore(c, &d, &n)) + } + + return items +} + +func canonicalizeLakeAssetStatus(des, initial *LakeAssetStatus, opts ...dcl.ApplyOption) *LakeAssetStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeAssetStatus{} + + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.ActiveAssets) || (dcl.IsEmptyValueIndirect(des.ActiveAssets) && dcl.IsEmptyValueIndirect(initial.ActiveAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveAssets = initial.ActiveAssets + } else { + cDes.ActiveAssets = des.ActiveAssets + } + if dcl.IsZeroValue(des.SecurityPolicyApplyingAssets) || (dcl.IsEmptyValueIndirect(des.SecurityPolicyApplyingAssets) && dcl.IsEmptyValueIndirect(initial.SecurityPolicyApplyingAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SecurityPolicyApplyingAssets = initial.SecurityPolicyApplyingAssets + } else { + cDes.SecurityPolicyApplyingAssets = des.SecurityPolicyApplyingAssets + } + + return cDes +} + +func canonicalizeLakeAssetStatusSlice(des, initial []LakeAssetStatus, opts ...dcl.ApplyOption) []LakeAssetStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeAssetStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeAssetStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeAssetStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeAssetStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeAssetStatus(c *Client, des, nw *LakeAssetStatus) *LakeAssetStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeAssetStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewLakeAssetStatusSet(c *Client, des, nw []LakeAssetStatus) []LakeAssetStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeAssetStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeAssetStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeAssetStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeAssetStatusSlice(c *Client, des, nw []LakeAssetStatus) []LakeAssetStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeAssetStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeAssetStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeLakeMetastoreStatus(des, initial *LakeMetastoreStatus, opts ...dcl.ApplyOption) *LakeMetastoreStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &LakeMetastoreStatus{} + + if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.State = initial.State + } else { + cDes.State = des.State + } + if dcl.StringCanonicalize(des.Message, initial.Message) || dcl.IsZeroValue(des.Message) { + cDes.Message = initial.Message + } else { + cDes.Message = des.Message + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.StringCanonicalize(des.Endpoint, initial.Endpoint) || dcl.IsZeroValue(des.Endpoint) { + cDes.Endpoint = initial.Endpoint + } else { + cDes.Endpoint = des.Endpoint + } + + return cDes +} + +func canonicalizeLakeMetastoreStatusSlice(des, initial []LakeMetastoreStatus, opts ...dcl.ApplyOption) []LakeMetastoreStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]LakeMetastoreStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeLakeMetastoreStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]LakeMetastoreStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeLakeMetastoreStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewLakeMetastoreStatus(c *Client, des, nw *LakeMetastoreStatus) *LakeMetastoreStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for LakeMetastoreStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + if dcl.StringCanonicalize(des.Endpoint, nw.Endpoint) { + nw.Endpoint = des.Endpoint + } + + return nw +} + +func canonicalizeNewLakeMetastoreStatusSet(c *Client, des, nw []LakeMetastoreStatus) []LakeMetastoreStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []LakeMetastoreStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareLakeMetastoreStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewLakeMetastoreStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewLakeMetastoreStatusSlice(c *Client, des, nw []LakeMetastoreStatus) []LakeMetastoreStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []LakeMetastoreStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewLakeMetastoreStatus(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffLake(c *Client, desired, actual *Lake, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metastore, actual.Metastore, dcl.DiffInfo{ObjectFunction: compareLakeMetastoreNewStyle, EmptyObject: EmptyLakeMetastore, OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Metastore")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssetStatus, actual.AssetStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareLakeAssetStatusNewStyle, EmptyObject: EmptyLakeAssetStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssetStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetastoreStatus, actual.MetastoreStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareLakeMetastoreStatusNewStyle, EmptyObject: EmptyLakeMetastoreStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareLakeMetastoreNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeMetastore) + if !ok { + desiredNotPointer, ok := d.(LakeMetastore) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastore or *LakeMetastore", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeMetastore) + if !ok { + actualNotPointer, ok := a.(LakeMetastore) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastore", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareLakeAssetStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeAssetStatus) + if !ok { + desiredNotPointer, ok := d.(LakeAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeAssetStatus or *LakeAssetStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeAssetStatus) + if !ok { + actualNotPointer, ok := a.(LakeAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeAssetStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ActiveAssets, actual.ActiveAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("ActiveAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityPolicyApplyingAssets, actual.SecurityPolicyApplyingAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("SecurityPolicyApplyingAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareLakeMetastoreStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*LakeMetastoreStatus) + if !ok { + desiredNotPointer, ok := d.(LakeMetastoreStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastoreStatus or *LakeMetastoreStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*LakeMetastoreStatus) + if !ok { + actualNotPointer, ok := a.(LakeMetastoreStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a LakeMetastoreStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLakeUpdateLakeOperation")}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Lake) urlNormalized() *Lake { + normalized := dcl.Copy(*r).(Lake) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.ServiceAccount = dcl.SelfLinkToName(r.ServiceAccount) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Lake) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateLake" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Lake resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Lake) marshal(c *Client) ([]byte, error) { + m, err := expandLake(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Lake: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalLake decodes JSON responses into the Lake resource schema. +func unmarshalLake(b []byte, c *Client, res *Lake) (*Lake, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapLake(m, c, res) +} + +func unmarshalMapLake(m map[string]interface{}, c *Client, res *Lake) (*Lake, error) { + + flattened := flattenLake(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandLake expands Lake into a JSON request object. +func expandLake(c *Client, f *Lake) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v, err := expandLakeMetastore(c, f.Metastore, res); err != nil { + return nil, fmt.Errorf("error expanding Metastore into metastore: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastore"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenLake flattens Lake from a JSON request object into the +// Lake type. +func flattenLake(c *Client, i interface{}, res *Lake) *Lake { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Lake{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenLakeStateEnum(m["state"]) + resultRes.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + resultRes.Metastore = flattenLakeMetastore(c, m["metastore"], res) + resultRes.AssetStatus = flattenLakeAssetStatus(c, m["assetStatus"], res) + resultRes.MetastoreStatus = flattenLakeMetastoreStatus(c, m["metastoreStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandLakeMetastoreMap expands the contents of LakeMetastore into a JSON +// request object. +func expandLakeMetastoreMap(c *Client, f map[string]LakeMetastore, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeMetastore(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeMetastoreSlice expands the contents of LakeMetastore into a JSON +// request object. +func expandLakeMetastoreSlice(c *Client, f []LakeMetastore, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeMetastore(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeMetastoreMap flattens the contents of LakeMetastore from a JSON +// response object. +func flattenLakeMetastoreMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastore { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastore{} + } + + if len(a) == 0 { + return map[string]LakeMetastore{} + } + + items := make(map[string]LakeMetastore) + for k, item := range a { + items[k] = *flattenLakeMetastore(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeMetastoreSlice flattens the contents of LakeMetastore from a JSON +// response object. +func flattenLakeMetastoreSlice(c *Client, i interface{}, res *Lake) []LakeMetastore { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastore{} + } + + if len(a) == 0 { + return []LakeMetastore{} + } + + items := make([]LakeMetastore, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastore(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeMetastore expands an instance of LakeMetastore into a JSON +// request object. +func expandLakeMetastore(c *Client, f *LakeMetastore, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + + return m, nil +} + +// flattenLakeMetastore flattens an instance of LakeMetastore from a JSON +// response object. +func flattenLakeMetastore(c *Client, i interface{}, res *Lake) *LakeMetastore { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeMetastore{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeMetastore + } + r.Service = dcl.FlattenString(m["service"]) + + return r +} + +// expandLakeAssetStatusMap expands the contents of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatusMap(c *Client, f map[string]LakeAssetStatus, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeAssetStatusSlice expands the contents of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatusSlice(c *Client, f []LakeAssetStatus, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeAssetStatusMap flattens the contents of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatusMap(c *Client, i interface{}, res *Lake) map[string]LakeAssetStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeAssetStatus{} + } + + if len(a) == 0 { + return map[string]LakeAssetStatus{} + } + + items := make(map[string]LakeAssetStatus) + for k, item := range a { + items[k] = *flattenLakeAssetStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeAssetStatusSlice flattens the contents of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatusSlice(c *Client, i interface{}, res *Lake) []LakeAssetStatus { + a, ok := i.([]interface{}) + if !ok { + return []LakeAssetStatus{} + } + + if len(a) == 0 { + return []LakeAssetStatus{} + } + + items := make([]LakeAssetStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeAssetStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeAssetStatus expands an instance of LakeAssetStatus into a JSON +// request object. +func expandLakeAssetStatus(c *Client, f *LakeAssetStatus, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.ActiveAssets; !dcl.IsEmptyValueIndirect(v) { + m["activeAssets"] = v + } + if v := f.SecurityPolicyApplyingAssets; !dcl.IsEmptyValueIndirect(v) { + m["securityPolicyApplyingAssets"] = v + } + + return m, nil +} + +// flattenLakeAssetStatus flattens an instance of LakeAssetStatus from a JSON +// response object. +func flattenLakeAssetStatus(c *Client, i interface{}, res *Lake) *LakeAssetStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeAssetStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeAssetStatus + } + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.ActiveAssets = dcl.FlattenInteger(m["activeAssets"]) + r.SecurityPolicyApplyingAssets = dcl.FlattenInteger(m["securityPolicyApplyingAssets"]) + + return r +} + +// expandLakeMetastoreStatusMap expands the contents of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatusMap(c *Client, f map[string]LakeMetastoreStatus, res *Lake) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandLakeMetastoreStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandLakeMetastoreStatusSlice expands the contents of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatusSlice(c *Client, f []LakeMetastoreStatus, res *Lake) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandLakeMetastoreStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenLakeMetastoreStatusMap flattens the contents of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatusMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastoreStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastoreStatus{} + } + + if len(a) == 0 { + return map[string]LakeMetastoreStatus{} + } + + items := make(map[string]LakeMetastoreStatus) + for k, item := range a { + items[k] = *flattenLakeMetastoreStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenLakeMetastoreStatusSlice flattens the contents of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatusSlice(c *Client, i interface{}, res *Lake) []LakeMetastoreStatus { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastoreStatus{} + } + + if len(a) == 0 { + return []LakeMetastoreStatus{} + } + + items := make([]LakeMetastoreStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastoreStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandLakeMetastoreStatus expands an instance of LakeMetastoreStatus into a JSON +// request object. +func expandLakeMetastoreStatus(c *Client, f *LakeMetastoreStatus, res *Lake) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.State; !dcl.IsEmptyValueIndirect(v) { + m["state"] = v + } + if v := f.Message; !dcl.IsEmptyValueIndirect(v) { + m["message"] = v + } + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.Endpoint; !dcl.IsEmptyValueIndirect(v) { + m["endpoint"] = v + } + + return m, nil +} + +// flattenLakeMetastoreStatus flattens an instance of LakeMetastoreStatus from a JSON +// response object. +func flattenLakeMetastoreStatus(c *Client, i interface{}, res *Lake) *LakeMetastoreStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &LakeMetastoreStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyLakeMetastoreStatus + } + r.State = flattenLakeMetastoreStatusStateEnum(m["state"]) + r.Message = dcl.FlattenString(m["message"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.Endpoint = dcl.FlattenString(m["endpoint"]) + + return r +} + +// flattenLakeStateEnumMap flattens the contents of LakeStateEnum from a JSON +// response object. +func flattenLakeStateEnumMap(c *Client, i interface{}, res *Lake) map[string]LakeStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeStateEnum{} + } + + if len(a) == 0 { + return map[string]LakeStateEnum{} + } + + items := make(map[string]LakeStateEnum) + for k, item := range a { + items[k] = *flattenLakeStateEnum(item.(interface{})) + } + + return items +} + +// flattenLakeStateEnumSlice flattens the contents of LakeStateEnum from a JSON +// response object. +func flattenLakeStateEnumSlice(c *Client, i interface{}, res *Lake) []LakeStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []LakeStateEnum{} + } + + if len(a) == 0 { + return []LakeStateEnum{} + } + + items := make([]LakeStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeStateEnum(item.(interface{}))) + } + + return items +} + +// flattenLakeStateEnum asserts that an interface is a string, and returns a +// pointer to a *LakeStateEnum with the same value as that string. +func flattenLakeStateEnum(i interface{}) *LakeStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return LakeStateEnumRef(s) +} + +// flattenLakeMetastoreStatusStateEnumMap flattens the contents of LakeMetastoreStatusStateEnum from a JSON +// response object. +func flattenLakeMetastoreStatusStateEnumMap(c *Client, i interface{}, res *Lake) map[string]LakeMetastoreStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]LakeMetastoreStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]LakeMetastoreStatusStateEnum{} + } + + items := make(map[string]LakeMetastoreStatusStateEnum) + for k, item := range a { + items[k] = *flattenLakeMetastoreStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenLakeMetastoreStatusStateEnumSlice flattens the contents of LakeMetastoreStatusStateEnum from a JSON +// response object. +func flattenLakeMetastoreStatusStateEnumSlice(c *Client, i interface{}, res *Lake) []LakeMetastoreStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []LakeMetastoreStatusStateEnum{} + } + + if len(a) == 0 { + return []LakeMetastoreStatusStateEnum{} + } + + items := make([]LakeMetastoreStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenLakeMetastoreStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenLakeMetastoreStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *LakeMetastoreStatusStateEnum with the same value as that string. +func flattenLakeMetastoreStatusStateEnum(i interface{}) *LakeMetastoreStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return LakeMetastoreStatusStateEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Lake) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalLake(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type lakeDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp lakeApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToLakeDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]lakeDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []lakeDiff + // For each operation name, create a lakeDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := lakeDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToLakeApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToLakeApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (lakeApiOperation, error) { + switch opName { + + case "updateLakeUpdateLakeOperation": + return &updateLakeUpdateLakeOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractLakeFields(r *Lake) error { + vMetastore := r.Metastore + if vMetastore == nil { + // note: explicitly not the empty object. + vMetastore = &LakeMetastore{} + } + if err := extractLakeMetastoreFields(r, vMetastore); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastore) { + r.Metastore = vMetastore + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &LakeAssetStatus{} + } + if err := extractLakeAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + vMetastoreStatus := r.MetastoreStatus + if vMetastoreStatus == nil { + // note: explicitly not the empty object. + vMetastoreStatus = &LakeMetastoreStatus{} + } + if err := extractLakeMetastoreStatusFields(r, vMetastoreStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreStatus) { + r.MetastoreStatus = vMetastoreStatus + } + return nil +} +func extractLakeMetastoreFields(r *Lake, o *LakeMetastore) error { + return nil +} +func extractLakeAssetStatusFields(r *Lake, o *LakeAssetStatus) error { + return nil +} +func extractLakeMetastoreStatusFields(r *Lake, o *LakeMetastoreStatus) error { + return nil +} + +func postReadExtractLakeFields(r *Lake) error { + vMetastore := r.Metastore + if vMetastore == nil { + // note: explicitly not the empty object. + vMetastore = &LakeMetastore{} + } + if err := postReadExtractLakeMetastoreFields(r, vMetastore); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastore) { + r.Metastore = vMetastore + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &LakeAssetStatus{} + } + if err := postReadExtractLakeAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + vMetastoreStatus := r.MetastoreStatus + if vMetastoreStatus == nil { + // note: explicitly not the empty object. + vMetastoreStatus = &LakeMetastoreStatus{} + } + if err := postReadExtractLakeMetastoreStatusFields(r, vMetastoreStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreStatus) { + r.MetastoreStatus = vMetastoreStatus + } + return nil +} +func postReadExtractLakeMetastoreFields(r *Lake, o *LakeMetastore) error { + return nil +} +func postReadExtractLakeAssetStatusFields(r *Lake, o *LakeAssetStatus) error { + return nil +} +func postReadExtractLakeMetastoreStatusFields(r *Lake, o *LakeMetastoreStatus) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl new file mode 100644 index 000000000000..b097e3d65ff5 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl @@ -0,0 +1,280 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLLakeSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataplex/Lake", + Description: "The Dataplex Lake resource", + StructName: "Lake", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Lake", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "lake", + Required: true, + Description: "A full instance of a Lake", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Lake", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "lake", + Required: true, + Description: "A full instance of a Lake", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Lake", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "lake", + Required: true, + Description: "A full instance of a Lake", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Lake", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Lake", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Lake": &dcl.Component{ + Title: "Lake", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "assetStatus": &dcl.Property{ + Type: "object", + GoName: "AssetStatus", + GoType: "LakeAssetStatus", + ReadOnly: true, + Description: "Output only. Aggregated status of the underlying assets of the lake.", + Properties: map[string]*dcl.Property{ + "activeAssets": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "ActiveAssets", + Description: "Number of active assets.", + }, + "securityPolicyApplyingAssets": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SecurityPolicyApplyingAssets", + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the status.", + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time when the lake was created.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. Description of the lake.", + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Optional. User friendly display name.", + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. User-defined labels for the lake.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "metastore": &dcl.Property{ + Type: "object", + GoName: "Metastore", + GoType: "LakeMetastore", + Description: "Optional. Settings to manage lake and Dataproc Metastore service instance association.", + Properties: map[string]*dcl.Property{ + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}`", + }, + }, + }, + "metastoreStatus": &dcl.Property{ + Type: "object", + GoName: "MetastoreStatus", + GoType: "LakeMetastoreStatus", + ReadOnly: true, + Description: "Output only. Metastore status of the lake.", + Properties: map[string]*dcl.Property{ + "endpoint": &dcl.Property{ + Type: "string", + GoName: "Endpoint", + Description: "The URI of the endpoint used to access the Metastore service.", + }, + "message": &dcl.Property{ + Type: "string", + GoName: "Message", + Description: "Additional information about the current status.", + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "LakeMetastoreStatusStateEnum", + Description: "Current state of association. Possible values: STATE_UNSPECIFIED, NONE, READY, UPDATING, ERROR", + Enum: []string{ + "STATE_UNSPECIFIED", + "NONE", + "READY", + "UPDATING", + "ERROR", + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the metastore status of the lake.", + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of the lake.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Dataplex/Lake", + Field: "selfLink", + Parent: true, + }, + }, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "serviceAccount": &dcl.Property{ + Type: "string", + GoName: "ServiceAccount", + ReadOnly: true, + Description: "Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "LakeStateEnum", + ReadOnly: true, + Description: "Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "ACTIVE", + "CREATING", + "DELETING", + "ACTION_REQUIRED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time when the lake was last updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go new file mode 100644 index 000000000000..93a630010fe0 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLDataplexClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.DataplexBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go new file mode 100644 index 000000000000..068f9d9a77c6 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset.go @@ -0,0 +1,909 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexAsset() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexAssetCreate, + Read: resourceDataplexAssetRead, + Update: resourceDataplexAssetUpdate, + Delete: resourceDataplexAssetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexAssetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The zone for the resource", + }, + + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the asset.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", + MaxItems: 1, + Elem: DataplexAssetResourceSpecSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the asset.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was created.", + }, + + "discovery_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", + Elem: DataplexAssetDiscoveryStatusSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the asset.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "resource_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the resource referenced by this asset.", + Elem: DataplexAssetResourceStatusSchema(), + }, + + "security_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Status of the security policy applied to resource referenced by this asset.", + Elem: DataplexAssetSecurityStatusSchema(), + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the asset was last updated.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexAssetDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexAssetDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexAssetDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexAssetResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", + }, + + "read_access_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "last_run_duration": { + Type: schema.TypeString, + Computed: true, + Description: "The duration of the last discovery run.", + }, + + "last_run_time": { + Type: schema.TypeString, + Computed: true, + Description: "The start time of the last discovery run.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", + }, + + "stats": { + Type: schema.TypeList, + Computed: true, + Description: "Data Stats of the asset reported by discovery.", + Elem: DataplexAssetDiscoveryStatusStatsSchema(), + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetDiscoveryStatusStatsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_items": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of data items within the referenced resource.", + }, + + "data_size": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of stored data bytes within the referenced resource.", + }, + + "filesets": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of fileset entities within the referenced resource.", + }, + + "tables": { + Type: schema.TypeInt, + Computed: true, + Description: "The count of table entities within the referenced resource.", + }, + }, + } +} + +func DataplexAssetResourceStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexAssetSecurityStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current state.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetAsset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexAsset %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("dataplex_zone", res.DataplexZone); err != nil { + return fmt.Errorf("error setting dataplex_zone in state: %s", err) + } + if err = d.Set("discovery_spec", flattenDataplexAssetDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexAssetResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("discovery_status", flattenDataplexAssetDiscoveryStatus(res.DiscoveryStatus)); err != nil { + return fmt.Errorf("error setting discovery_status in state: %s", err) + } + if err = d.Set("labels", flattenDataplexAssetLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("resource_status", flattenDataplexAssetResourceStatus(res.ResourceStatus)); err != nil { + return fmt.Errorf("error setting resource_status in state: %s", err) + } + if err = d.Set("security_status", flattenDataplexAssetSecurityStatus(res.SecurityStatus)); err != nil { + return fmt.Errorf("error setting security_status in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexAssetTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyAsset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Asset: %s", err) + } + + log.Printf("[DEBUG] Finished creating Asset %q: %#v", d.Id(), res) + + return resourceDataplexAssetRead(d, meta) +} + +func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Asset{ + DataplexZone: dcl.String(d.Get("dataplex_zone").(string)), + DiscoverySpec: expandDataplexAssetDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Asset %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteAsset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Asset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Asset %q", d.Id()) + return nil +} + +func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexAssetDiscoverySpec(o interface{}) *AssetDiscoverySpec { + if o == nil { + return EmptyAssetDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyAssetDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexAssetDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexAssetDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.String(obj["schedule"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpec(obj *AssetDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexAssetDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexAssetDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecCsvOptions(o interface{}) *AssetDiscoverySpecCsvOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexAssetDiscoverySpecCsvOptions(obj *AssetDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetDiscoverySpecJsonOptions(o interface{}) *AssetDiscoverySpecJsonOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &AssetDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexAssetDiscoverySpecJsonOptions(obj *AssetDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexAssetResourceSpec(o interface{}) *AssetResourceSpec { + if o == nil { + return EmptyAssetResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyAssetResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &AssetResourceSpec{ + Type: AssetResourceSpecTypeEnumRef(obj["type"].(string)), + Name: dcl.String(obj["name"].(string)), + ReadAccessMode: AssetResourceSpecReadAccessModeEnumRef(obj["read_access_mode"].(string)), + } +} + +func flattenDataplexAssetResourceSpec(obj *AssetResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "type": obj.Type, + "name": obj.Name, + "read_access_mode": obj.ReadAccessMode, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatus(obj *AssetDiscoveryStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "last_run_duration": obj.LastRunDuration, + "last_run_time": obj.LastRunTime, + "message": obj.Message, + "state": obj.State, + "stats": flattenDataplexAssetDiscoveryStatusStats(obj.Stats), + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetDiscoveryStatusStats(obj *AssetDiscoveryStatusStats) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "data_items": obj.DataItems, + "data_size": obj.DataSize, + "filesets": obj.Filesets, + "tables": obj.Tables, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetResourceStatus(obj *AssetResourceStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetSecurityStatus(obj *AssetSecurityStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexAssetLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexAssetTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl new file mode 100644 index 000000000000..1c0c78e41767 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl @@ -0,0 +1,239 @@ +package dataplex_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" +{{- else }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccDataplexAsset_BasicAssetHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexAssetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexAsset_BasicAssetHandWritten(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name", "labels", "terraform_labels"}, + }, + { + Config: testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context), + }, + { + ResourceName: "google_dataplex_asset.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_spec.0.name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexAsset_BasicAssetHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/tf-test-bucket%{random_suffix}" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccDataplexAsset_BasicAssetHandWrittenUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "basic_bucket" { + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "%{project_name}" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "tf-test-lake%{random_suffix}" + location = "%{region}" + project = "%{project_name}" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "tf-test-zone%{random_suffix}" + location = "%{region}" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "%{project_name}" +} + + +resource "google_dataplex_asset" "primary" { + name = "tf-test-asset%{random_suffix}" + location = "%{region}" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/%{project_name}/buckets/tf-test-bucket%{random_suffix}" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "%{project_name}" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +`, context) +} + +func testAccCheckDataplexAssetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_asset" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Asset{ + DataplexZone: dcl.String(rs.Primary.Attributes["dataplex_zone"]), + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.AssetStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetAsset(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_asset still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go new file mode 100644 index 000000000000..104ede539247 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake.go @@ -0,0 +1,555 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexLake() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexLakeCreate, + Read: resourceDataplexLakeRead, + Update: resourceDataplexLakeUpdate, + Delete: resourceDataplexLakeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexLakeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the lake.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the lake.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "metastore": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Settings to manage lake and Dataproc Metastore service instance association.", + MaxItems: 1, + Elem: DataplexLakeMetastoreSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the lake.", + Elem: DataplexLakeAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the lake was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User-defined labels for the lake.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "metastore_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Metastore status of the lake.", + Elem: DataplexLakeMetastoreStatusSchema(), + }, + + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the lake was last updated.", + }, + }, + } +} + +func DataplexLakeMetastoreSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}`", + }, + }, + } +} + +func DataplexLakeAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func DataplexLakeMetastoreStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "The URI of the endpoint used to access the Metastore service.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Additional information about the current status.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Current state of association. Possible values: STATE_UNSPECIFIED, NONE, READY, UPDATING, ERROR", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the metastore status of the lake.", + }, + }, + } +} + +func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyLake(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Lake: %s", err) + } + + log.Printf("[DEBUG] Finished creating Lake %q: %#v", d.Id(), res) + + return resourceDataplexLakeRead(d, meta) +} + +func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetLake(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexLake %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("metastore", flattenDataplexLakeMetastore(res.Metastore)); err != nil { + return fmt.Errorf("error setting metastore in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexLakeAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataplexLakeLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("metastore_status", flattenDataplexLakeMetastoreStatus(res.MetastoreStatus)); err != nil { + return fmt.Errorf("error setting metastore_status in state: %s", err) + } + if err = d.Set("service_account", res.ServiceAccount); err != nil { + return fmt.Errorf("error setting service_account in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexLakeTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexLakeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyLake(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Lake: %s", err) + } + + log.Printf("[DEBUG] Finished creating Lake %q: %#v", d.Id(), res) + + return resourceDataplexLakeRead(d, meta) +} + +func resourceDataplexLakeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Lake{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Metastore: expandDataplexLakeMetastore(d.Get("metastore")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Lake %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteLake(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Lake: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Lake %q", d.Id()) + return nil +} + +func resourceDataplexLakeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexLakeMetastore(o interface{}) *LakeMetastore { + if o == nil { + return EmptyLakeMetastore + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyLakeMetastore + } + obj := objArr[0].(map[string]interface{}) + return &LakeMetastore{ + Service: dcl.String(obj["service"].(string)), + } +} + +func flattenDataplexLakeMetastore(obj *LakeMetastore) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeAssetStatus(obj *LakeAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeMetastoreStatus(obj *LakeMetastoreStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "endpoint": obj.Endpoint, + "message": obj.Message, + "state": obj.State, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexLakeLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexLakeTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl new file mode 100644 index 000000000000..d04efc4d65d3 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl @@ -0,0 +1,132 @@ +package dataplex_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" +{{- else }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccDataplexLake_BasicLake(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexLakeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexLake_BasicLake(context), + }, + { + ResourceName: "google_dataplex_lake.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDataplexLake_BasicLakeUpdate0(context), + }, + { + ResourceName: "google_dataplex_lake.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexLake_BasicLake(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_lake" "primary" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccDataplexLake_BasicLakeUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_lake" "primary" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Updated description for lake" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccCheckDataplexLakeDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_lake" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Lake{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + ServiceAccount: dcl.StringOrNil(rs.Primary.Attributes["service_account"]), + State: dataplex.LakeStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetLake(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_lake still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go new file mode 100644 index 000000000000..9fd412f4aa82 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_sweeper.go @@ -0,0 +1,53 @@ +package dataplex + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("DataplexLake", testSweepDataplexLake) +} + +func testSweepDataplexLake(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataplexLake") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLDataplexClient(config, config.UserAgent, "", 0) + err = client.DeleteAllLake(context.Background(), d["project"], d["location"], isDeletableDataplexLake) + if err != nil { + return err + } + return nil +} + +func isDeletableDataplexLake(r *Lake) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go new file mode 100644 index 000000000000..4ae8af723d60 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone.go @@ -0,0 +1,731 @@ +package dataplex + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataplexZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexZoneCreate, + Read: resourceDataplexZoneRead, + Update: resourceDataplexZoneUpdate, + Delete: resourceDataplexZoneDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexZoneImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "discovery_spec": { + Type: schema.TypeList, + Required: true, + Description: "Required. Specification of the discovery feature applied to data in this zone.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecSchema(), + }, + + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The lake for the resource", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the zone.", + }, + + "resource_spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", + MaxItems: 1, + Elem: DataplexZoneResourceSpecSchema(), + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the zone.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. User friendly display name.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "asset_status": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Aggregated status of the underlying assets of the zone.", + Elem: DataplexZoneAssetStatusSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User defined labels for the zone.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when the zone was last updated.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Required. Whether discovery is enabled.", + }, + + "csv_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for CSV data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecCsvOptionsSchema(), + }, + + "exclude_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "include_patterns": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "json_options": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Optional. Configuration for Json data.", + MaxItems: 1, + Elem: DataplexZoneDiscoverySpecJsonOptionsSchema(), + }, + + "schedule": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + }, + }, + } +} + +func DataplexZoneDiscoverySpecCsvOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + + "header_rows": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + } +} + +func DataplexZoneDiscoverySpecJsonOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_type_inference": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + } +} + +func DataplexZoneResourceSpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", + }, + }, + } +} + +func DataplexZoneAssetStatusSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of active assets.", + }, + + "security_policy_applying_assets": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last update time of the status.", + }, + }, + } +} + +func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetZone(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataplexZone %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("discovery_spec", flattenDataplexZoneDiscoverySpec(res.DiscoverySpec)); err != nil { + return fmt.Errorf("error setting discovery_spec in state: %s", err) + } + if err = d.Set("lake", res.Lake); err != nil { + return fmt.Errorf("error setting lake in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resource_spec", flattenDataplexZoneResourceSpec(res.ResourceSpec)); err != nil { + return fmt.Errorf("error setting resource_spec in state: %s", err) + } + if err = d.Set("type", res.Type); err != nil { + return fmt.Errorf("error setting type in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("asset_status", flattenDataplexZoneAssetStatus(res.AssetStatus)); err != nil { + return fmt.Errorf("error setting asset_status in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataplexZoneLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataplexZoneTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyZone(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Zone: %s", err) + } + + log.Printf("[DEBUG] Finished creating Zone %q: %#v", d.Id(), res) + + return resourceDataplexZoneRead(d, meta) +} + +func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Zone{ + DiscoverySpec: expandDataplexZoneDiscoverySpec(d.Get("discovery_spec")), + Lake: dcl.String(d.Get("lake").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + ResourceSpec: expandDataplexZoneResourceSpec(d.Get("resource_spec")), + Type: ZoneTypeEnumRef(d.Get("type").(string)), + Description: dcl.String(d.Get("description").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Zone %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteZone(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Zone: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Zone %q", d.Id()) + return nil +} + +func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataplexZoneDiscoverySpec(o interface{}) *ZoneDiscoverySpec { + if o == nil { + return EmptyZoneDiscoverySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyZoneDiscoverySpec + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpec{ + Enabled: dcl.Bool(obj["enabled"].(bool)), + CsvOptions: expandDataplexZoneDiscoverySpecCsvOptions(obj["csv_options"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), + JsonOptions: expandDataplexZoneDiscoverySpecJsonOptions(obj["json_options"]), + Schedule: dcl.StringOrNil(obj["schedule"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpec(obj *ZoneDiscoverySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enabled": obj.Enabled, + "csv_options": flattenDataplexZoneDiscoverySpecCsvOptions(obj.CsvOptions), + "exclude_patterns": obj.ExcludePatterns, + "include_patterns": obj.IncludePatterns, + "json_options": flattenDataplexZoneDiscoverySpecJsonOptions(obj.JsonOptions), + "schedule": obj.Schedule, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecCsvOptions(o interface{}) *ZoneDiscoverySpecCsvOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpecCsvOptions{ + Delimiter: dcl.String(obj["delimiter"].(string)), + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + HeaderRows: dcl.Int64(int64(obj["header_rows"].(int))), + } +} + +func flattenDataplexZoneDiscoverySpecCsvOptions(obj *ZoneDiscoverySpecCsvOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "delimiter": obj.Delimiter, + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + "header_rows": obj.HeaderRows, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneDiscoverySpecJsonOptions(o interface{}) *ZoneDiscoverySpecJsonOptions { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &ZoneDiscoverySpecJsonOptions{ + DisableTypeInference: dcl.Bool(obj["disable_type_inference"].(bool)), + Encoding: dcl.String(obj["encoding"].(string)), + } +} + +func flattenDataplexZoneDiscoverySpecJsonOptions(obj *ZoneDiscoverySpecJsonOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disable_type_inference": obj.DisableTypeInference, + "encoding": obj.Encoding, + } + + return []interface{}{transformed} + +} + +func expandDataplexZoneResourceSpec(o interface{}) *ZoneResourceSpec { + if o == nil { + return EmptyZoneResourceSpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyZoneResourceSpec + } + obj := objArr[0].(map[string]interface{}) + return &ZoneResourceSpec{ + LocationType: ZoneResourceSpecLocationTypeEnumRef(obj["location_type"].(string)), + } +} + +func flattenDataplexZoneResourceSpec(obj *ZoneResourceSpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location_type": obj.LocationType, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneAssetStatus(obj *ZoneAssetStatus) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "active_assets": obj.ActiveAssets, + "security_policy_applying_assets": obj.SecurityPolicyApplyingAssets, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenDataplexZoneLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataplexZoneTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl new file mode 100644 index 000000000000..a915cddac1e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl @@ -0,0 +1,176 @@ +package dataplex_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" +{{- else }} + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccDataplexZone_BasicZone(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataplexZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataplexZone_BasicZone(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDataplexZone_BasicZoneUpdate0(context), + }, + { + ResourceName: "google_dataplex_zone.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataplexZone_BasicZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL" + display_name = "Zone for DCL" + project = "%{project_name}" + labels = {} +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccDataplexZone_BasicZoneUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "%{region}" + name = "tf-test-zone%{random_suffix}" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL Updated" + display_name = "Zone for DCL" + project = "%{project_name}" + + labels = { + updated_label = "exists" + } +} + +resource "google_dataplex_lake" "basic" { + location = "%{region}" + name = "tf-test-lake%{random_suffix}" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "%{project_name}" + + labels = { + my-lake = "exists" + } +} + + +`, context) +} + +func testAccCheckDataplexZoneDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_dataplex_zone" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &dataplex.Zone{ + Lake: dcl.String(rs.Primary.Attributes["lake"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Type: dataplex.ZoneTypeEnumRef(rs.Primary.Attributes["type"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: dataplex.ZoneStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetZone(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_dataplex_zone still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl new file mode 100644 index 000000000000..94bc6ef6008d --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl @@ -0,0 +1,744 @@ +package dataplex + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Zone struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *ZoneStateEnum `json:"state"` + Type *ZoneTypeEnum `json:"type"` + DiscoverySpec *ZoneDiscoverySpec `json:"discoverySpec"` + ResourceSpec *ZoneResourceSpec `json:"resourceSpec"` + AssetStatus *ZoneAssetStatus `json:"assetStatus"` + Project *string `json:"project"` + Location *string `json:"location"` + Lake *string `json:"lake"` +} + +func (r *Zone) String() string { + return dcl.SprintResource(r) +} + +// The enum ZoneStateEnum. +type ZoneStateEnum string + +// ZoneStateEnumRef returns a *ZoneStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneStateEnumRef(s string) *ZoneStateEnum { + v := ZoneStateEnum(s) + return &v +} + +func (v ZoneStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ACTIVE", "CREATING", "DELETING", "ACTION_REQUIRED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ZoneTypeEnum. +type ZoneTypeEnum string + +// ZoneTypeEnumRef returns a *ZoneTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneTypeEnumRef(s string) *ZoneTypeEnum { + v := ZoneTypeEnum(s) + return &v +} + +func (v ZoneTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TYPE_UNSPECIFIED", "RAW", "CURATED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ZoneResourceSpecLocationTypeEnum. +type ZoneResourceSpecLocationTypeEnum string + +// ZoneResourceSpecLocationTypeEnumRef returns a *ZoneResourceSpecLocationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ZoneResourceSpecLocationTypeEnumRef(s string) *ZoneResourceSpecLocationTypeEnum { + v := ZoneResourceSpecLocationTypeEnum(s) + return &v +} + +func (v ZoneResourceSpecLocationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"LOCATION_TYPE_UNSPECIFIED", "SINGLE_REGION", "MULTI_REGION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ZoneResourceSpecLocationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ZoneDiscoverySpec struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + IncludePatterns []string `json:"includePatterns"` + ExcludePatterns []string `json:"excludePatterns"` + CsvOptions *ZoneDiscoverySpecCsvOptions `json:"csvOptions"` + JsonOptions *ZoneDiscoverySpecJsonOptions `json:"jsonOptions"` + Schedule *string `json:"schedule"` +} + +type jsonZoneDiscoverySpec ZoneDiscoverySpec + +func (r *ZoneDiscoverySpec) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpec + } else { + + r.Enabled = res.Enabled + + r.IncludePatterns = res.IncludePatterns + + r.ExcludePatterns = res.ExcludePatterns + + r.CsvOptions = res.CsvOptions + + r.JsonOptions = res.JsonOptions + + r.Schedule = res.Schedule + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpec *ZoneDiscoverySpec = &ZoneDiscoverySpec{empty: true} + +func (r *ZoneDiscoverySpec) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpec) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneDiscoverySpecCsvOptions struct { + empty bool `json:"-"` + HeaderRows *int64 `json:"headerRows"` + Delimiter *string `json:"delimiter"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonZoneDiscoverySpecCsvOptions ZoneDiscoverySpecCsvOptions + +func (r *ZoneDiscoverySpecCsvOptions) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpecCsvOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpecCsvOptions + } else { + + r.HeaderRows = res.HeaderRows + + r.Delimiter = res.Delimiter + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpecCsvOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpecCsvOptions *ZoneDiscoverySpecCsvOptions = &ZoneDiscoverySpecCsvOptions{empty: true} + +func (r *ZoneDiscoverySpecCsvOptions) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpecCsvOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpecCsvOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneDiscoverySpecJsonOptions struct { + empty bool `json:"-"` + Encoding *string `json:"encoding"` + DisableTypeInference *bool `json:"disableTypeInference"` +} + +type jsonZoneDiscoverySpecJsonOptions ZoneDiscoverySpecJsonOptions + +func (r *ZoneDiscoverySpecJsonOptions) UnmarshalJSON(data []byte) error { + var res jsonZoneDiscoverySpecJsonOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneDiscoverySpecJsonOptions + } else { + + r.Encoding = res.Encoding + + r.DisableTypeInference = res.DisableTypeInference + + } + return nil +} + +// This object is used to assert a desired state where this ZoneDiscoverySpecJsonOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneDiscoverySpecJsonOptions *ZoneDiscoverySpecJsonOptions = &ZoneDiscoverySpecJsonOptions{empty: true} + +func (r *ZoneDiscoverySpecJsonOptions) Empty() bool { + return r.empty +} + +func (r *ZoneDiscoverySpecJsonOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneDiscoverySpecJsonOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneResourceSpec struct { + empty bool `json:"-"` + LocationType *ZoneResourceSpecLocationTypeEnum `json:"locationType"` +} + +type jsonZoneResourceSpec ZoneResourceSpec + +func (r *ZoneResourceSpec) UnmarshalJSON(data []byte) error { + var res jsonZoneResourceSpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneResourceSpec + } else { + + r.LocationType = res.LocationType + + } + return nil +} + +// This object is used to assert a desired state where this ZoneResourceSpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneResourceSpec *ZoneResourceSpec = &ZoneResourceSpec{empty: true} + +func (r *ZoneResourceSpec) Empty() bool { + return r.empty +} + +func (r *ZoneResourceSpec) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneResourceSpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ZoneAssetStatus struct { + empty bool `json:"-"` + UpdateTime *string `json:"updateTime"` + ActiveAssets *int64 `json:"activeAssets"` + SecurityPolicyApplyingAssets *int64 `json:"securityPolicyApplyingAssets"` +} + +type jsonZoneAssetStatus ZoneAssetStatus + +func (r *ZoneAssetStatus) UnmarshalJSON(data []byte) error { + var res jsonZoneAssetStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyZoneAssetStatus + } else { + + r.UpdateTime = res.UpdateTime + + r.ActiveAssets = res.ActiveAssets + + r.SecurityPolicyApplyingAssets = res.SecurityPolicyApplyingAssets + + } + return nil +} + +// This object is used to assert a desired state where this ZoneAssetStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyZoneAssetStatus *ZoneAssetStatus = &ZoneAssetStatus{empty: true} + +func (r *ZoneAssetStatus) Empty() bool { + return r.empty +} + +func (r *ZoneAssetStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *ZoneAssetStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Zone) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataplex", + Type: "Zone", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataplex", +{{- end }} + } +} + +func (r *Zone) ID() (string, error) { + if err := extractZoneFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "type": dcl.ValueOrEmptyString(nr.Type), + "discovery_spec": dcl.ValueOrEmptyString(nr.DiscoverySpec), + "resource_spec": dcl.ValueOrEmptyString(nr.ResourceSpec), + "asset_status": dcl.ValueOrEmptyString(nr.AssetStatus), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ZoneMaxPage = -1 + +type ZoneList struct { + Items []*Zone + + nextToken string + + pageSize int32 + + resource *Zone +} + +func (l *ZoneList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ZoneList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listZone(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListZone(ctx context.Context, project, location, lake string) (*ZoneList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListZoneWithMaxResults(ctx, project, location, lake, ZoneMaxPage) + +} + +func (c *Client) ListZoneWithMaxResults(ctx context.Context, project, location, lake string, pageSize int32) (*ZoneList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Zone{ + Project: &project, + Location: &location, + Lake: &lake, + } + items, token, err := c.listZone(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ZoneList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetZone(ctx context.Context, r *Zone) (*Zone, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractZoneFields(r) + + b, err := c.getZoneRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalZone(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Lake = r.Lake + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeZoneNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractZoneFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteZone(ctx context.Context, r *Zone) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Zone resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Zone...") + deleteOp := deleteZoneOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllZone deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllZone(ctx context.Context, project, location, lake string, filter func(*Zone) bool) error { + listObj, err := c.ListZone(ctx, project, location, lake) + if err != nil { + return err + } + + err = c.deleteAllZone(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllZone(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyZone(ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Zone + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyZoneHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyZoneHelper(c *Client, ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyZone...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractZoneFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.zoneDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToZoneDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []zoneApiOperation + if create { + ops = append(ops, &createZoneOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyZoneDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyZoneDiff(c *Client, ctx context.Context, desired *Zone, rawDesired *Zone, ops []zoneApiOperation, opts ...dcl.ApplyOption) (*Zone, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetZone(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createZoneOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapZone(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeZoneNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeZoneNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeZoneDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractZoneFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractZoneFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffZone(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Zone) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl new file mode 100644 index 000000000000..97c9d297a357 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/zone_internal.go.tmpl @@ -0,0 +1,2830 @@ +package dataplex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Zone) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "type"); err != nil { + return err + } + if err := dcl.Required(r, "discoverySpec"); err != nil { + return err + } + if err := dcl.Required(r, "resourceSpec"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Lake, "Lake"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.DiscoverySpec) { + if err := r.DiscoverySpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ResourceSpec) { + if err := r.ResourceSpec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AssetStatus) { + if err := r.AssetStatus.validate(); err != nil { + return err + } + } + return nil +} +func (r *ZoneDiscoverySpec) validate() error { + if err := dcl.Required(r, "enabled"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.CsvOptions) { + if err := r.CsvOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.JsonOptions) { + if err := r.JsonOptions.validate(); err != nil { + return err + } + } + return nil +} +func (r *ZoneDiscoverySpecCsvOptions) validate() error { + return nil +} +func (r *ZoneDiscoverySpecJsonOptions) validate() error { + return nil +} +func (r *ZoneResourceSpec) validate() error { + if err := dcl.Required(r, "locationType"); err != nil { + return err + } + return nil +} +func (r *ZoneAssetStatus) validate() error { + return nil +} +func (r *Zone) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataplex.googleapis.com/v1/", params) +} + +func (r *Zone) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Zone) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones", nr.basePath(), userBasePath, params), nil + +} + +func (r *Zone) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones?zoneId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Zone) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Zone) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Zone) SetPolicyVerb() string { + return "" +} + +func (r *Zone) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Zone) IAMPolicyVersion() int { + return 3 +} + +// zoneApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type zoneApiOperation interface { + do(context.Context, *Zone, *Client) error +} + +// newUpdateZoneUpdateZoneRequest creates a request for an +// Zone resource's UpdateZone update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateZoneUpdateZoneRequest(ctx context.Context, f *Zone, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["name"] = v + } + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v, err := expandZoneDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["discoverySpec"] = v + } + if v, err := expandZoneAssetStatus(c, f.AssetStatus, res); err != nil { + return nil, fmt.Errorf("error expanding AssetStatus into assetStatus: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["assetStatus"] = v + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", *f.Project, *f.Location, *f.Lake, *f.Name) + + return req, nil +} + +// marshalUpdateZoneUpdateZoneRequest converts the update into +// the final JSON request body. +func marshalUpdateZoneUpdateZoneRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateZoneUpdateZoneOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateZoneUpdateZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + _, err := c.GetZone(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateZone") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateZoneUpdateZoneRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateZoneUpdateZoneRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listZoneRaw(ctx context.Context, r *Zone, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ZoneMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listZoneOperation struct { + Zones []map[string]interface{} `json:"zones"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listZone(ctx context.Context, r *Zone, pageToken string, pageSize int32) ([]*Zone, string, error) { + b, err := c.listZoneRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listZoneOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Zone + for _, v := range m.Zones { + res, err := unmarshalMapZone(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Lake = r.Lake + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllZone(ctx context.Context, f func(*Zone) bool, resources []*Zone) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteZone(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteZoneOperation struct{} + +func (op *deleteZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + r, err := c.GetZone(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Zone not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetZone checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetZone(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createZoneOperation struct { + response map[string]interface{} +} + +func (op *createZoneOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createZoneOperation) do(ctx context.Context, r *Zone, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetZone(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getZoneRaw(ctx context.Context, r *Zone) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) zoneDiffsForRawDesired(ctx context.Context, rawDesired *Zone, opts ...dcl.ApplyOption) (initial, desired *Zone, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Zone + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Zone); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Zone, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetZone(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Zone resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Zone resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Zone resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeZoneDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Zone: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Zone: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractZoneFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeZoneInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Zone: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeZoneDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Zone: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffZone(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeZoneInitialState(rawInitial, rawDesired *Zone) (*Zone, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeZoneDesiredState(rawDesired, rawInitial *Zone, opts ...dcl.ApplyOption) (*Zone, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.DiscoverySpec = canonicalizeZoneDiscoverySpec(rawDesired.DiscoverySpec, nil, opts...) + rawDesired.ResourceSpec = canonicalizeZoneResourceSpec(rawDesired.ResourceSpec, nil, opts...) + rawDesired.AssetStatus = canonicalizeZoneAssetStatus(rawDesired.AssetStatus, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Zone{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.IsZeroValue(rawDesired.Type) || (dcl.IsEmptyValueIndirect(rawDesired.Type) && dcl.IsEmptyValueIndirect(rawInitial.Type)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Type = rawInitial.Type + } else { + canonicalDesired.Type = rawDesired.Type + } + canonicalDesired.DiscoverySpec = canonicalizeZoneDiscoverySpec(rawDesired.DiscoverySpec, rawInitial.DiscoverySpec, opts...) + canonicalDesired.ResourceSpec = canonicalizeZoneResourceSpec(rawDesired.ResourceSpec, rawInitial.ResourceSpec, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Lake, rawInitial.Lake) { + canonicalDesired.Lake = rawInitial.Lake + } else { + canonicalDesired.Lake = rawDesired.Lake + } + return canonicalDesired, nil +} + +func canonicalizeZoneNewState(c *Client, rawNew, rawDesired *Zone) (*Zone, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { + rawNew.Uid = rawDesired.Uid + } else { + if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { + rawNew.Uid = rawDesired.Uid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Type) && dcl.IsEmptyValueIndirect(rawDesired.Type) { + rawNew.Type = rawDesired.Type + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DiscoverySpec) && dcl.IsEmptyValueIndirect(rawDesired.DiscoverySpec) { + rawNew.DiscoverySpec = rawDesired.DiscoverySpec + } else { + rawNew.DiscoverySpec = canonicalizeNewZoneDiscoverySpec(c, rawDesired.DiscoverySpec, rawNew.DiscoverySpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceSpec) && dcl.IsEmptyValueIndirect(rawDesired.ResourceSpec) { + rawNew.ResourceSpec = rawDesired.ResourceSpec + } else { + rawNew.ResourceSpec = canonicalizeNewZoneResourceSpec(c, rawDesired.ResourceSpec, rawNew.ResourceSpec) + } + + if dcl.IsEmptyValueIndirect(rawNew.AssetStatus) && dcl.IsEmptyValueIndirect(rawDesired.AssetStatus) { + rawNew.AssetStatus = rawDesired.AssetStatus + } else { + rawNew.AssetStatus = canonicalizeNewZoneAssetStatus(c, rawDesired.AssetStatus, rawNew.AssetStatus) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Lake = rawDesired.Lake + + return rawNew, nil +} + +func canonicalizeZoneDiscoverySpec(des, initial *ZoneDiscoverySpec, opts ...dcl.ApplyOption) *ZoneDiscoverySpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpec{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, initial.IncludePatterns) { + cDes.IncludePatterns = initial.IncludePatterns + } else { + cDes.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, initial.ExcludePatterns) { + cDes.ExcludePatterns = initial.ExcludePatterns + } else { + cDes.ExcludePatterns = des.ExcludePatterns + } + cDes.CsvOptions = canonicalizeZoneDiscoverySpecCsvOptions(des.CsvOptions, initial.CsvOptions, opts...) + cDes.JsonOptions = canonicalizeZoneDiscoverySpecJsonOptions(des.JsonOptions, initial.JsonOptions, opts...) + if dcl.StringCanonicalize(des.Schedule, initial.Schedule) || dcl.IsZeroValue(des.Schedule) { + cDes.Schedule = initial.Schedule + } else { + cDes.Schedule = des.Schedule + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecSlice(des, initial []ZoneDiscoverySpec, opts ...dcl.ApplyOption) []ZoneDiscoverySpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpec(c *Client, des, nw *ZoneDiscoverySpec) *ZoneDiscoverySpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.IncludePatterns, nw.IncludePatterns) { + nw.IncludePatterns = des.IncludePatterns + } + if dcl.StringArrayCanonicalize(des.ExcludePatterns, nw.ExcludePatterns) { + nw.ExcludePatterns = des.ExcludePatterns + } + nw.CsvOptions = canonicalizeNewZoneDiscoverySpecCsvOptions(c, des.CsvOptions, nw.CsvOptions) + nw.JsonOptions = canonicalizeNewZoneDiscoverySpecJsonOptions(c, des.JsonOptions, nw.JsonOptions) + if dcl.StringCanonicalize(des.Schedule, nw.Schedule) { + nw.Schedule = des.Schedule + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecSet(c *Client, des, nw []ZoneDiscoverySpec) []ZoneDiscoverySpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecSlice(c *Client, des, nw []ZoneDiscoverySpec) []ZoneDiscoverySpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpec(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneDiscoverySpecCsvOptions(des, initial *ZoneDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) *ZoneDiscoverySpecCsvOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpecCsvOptions{} + + if dcl.IsZeroValue(des.HeaderRows) || (dcl.IsEmptyValueIndirect(des.HeaderRows) && dcl.IsEmptyValueIndirect(initial.HeaderRows)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.HeaderRows = initial.HeaderRows + } else { + cDes.HeaderRows = des.HeaderRows + } + if dcl.StringCanonicalize(des.Delimiter, initial.Delimiter) || dcl.IsZeroValue(des.Delimiter) { + cDes.Delimiter = initial.Delimiter + } else { + cDes.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecCsvOptionsSlice(des, initial []ZoneDiscoverySpecCsvOptions, opts ...dcl.ApplyOption) []ZoneDiscoverySpecCsvOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpecCsvOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpecCsvOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpecCsvOptions(c *Client, des, nw *ZoneDiscoverySpecCsvOptions) *ZoneDiscoverySpecCsvOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpecCsvOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Delimiter, nw.Delimiter) { + nw.Delimiter = des.Delimiter + } + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecCsvOptionsSet(c *Client, des, nw []ZoneDiscoverySpecCsvOptions) []ZoneDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpecCsvOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecCsvOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpecCsvOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecCsvOptionsSlice(c *Client, des, nw []ZoneDiscoverySpecCsvOptions) []ZoneDiscoverySpecCsvOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpecCsvOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpecCsvOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneDiscoverySpecJsonOptions(des, initial *ZoneDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) *ZoneDiscoverySpecJsonOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneDiscoverySpecJsonOptions{} + + if dcl.StringCanonicalize(des.Encoding, initial.Encoding) || dcl.IsZeroValue(des.Encoding) { + cDes.Encoding = initial.Encoding + } else { + cDes.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, initial.DisableTypeInference) || dcl.IsZeroValue(des.DisableTypeInference) { + cDes.DisableTypeInference = initial.DisableTypeInference + } else { + cDes.DisableTypeInference = des.DisableTypeInference + } + + return cDes +} + +func canonicalizeZoneDiscoverySpecJsonOptionsSlice(des, initial []ZoneDiscoverySpecJsonOptions, opts ...dcl.ApplyOption) []ZoneDiscoverySpecJsonOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneDiscoverySpecJsonOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneDiscoverySpecJsonOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneDiscoverySpecJsonOptions(c *Client, des, nw *ZoneDiscoverySpecJsonOptions) *ZoneDiscoverySpecJsonOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneDiscoverySpecJsonOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Encoding, nw.Encoding) { + nw.Encoding = des.Encoding + } + if dcl.BoolCanonicalize(des.DisableTypeInference, nw.DisableTypeInference) { + nw.DisableTypeInference = des.DisableTypeInference + } + + return nw +} + +func canonicalizeNewZoneDiscoverySpecJsonOptionsSet(c *Client, des, nw []ZoneDiscoverySpecJsonOptions) []ZoneDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneDiscoverySpecJsonOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneDiscoverySpecJsonOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneDiscoverySpecJsonOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneDiscoverySpecJsonOptionsSlice(c *Client, des, nw []ZoneDiscoverySpecJsonOptions) []ZoneDiscoverySpecJsonOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneDiscoverySpecJsonOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneDiscoverySpecJsonOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneResourceSpec(des, initial *ZoneResourceSpec, opts ...dcl.ApplyOption) *ZoneResourceSpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneResourceSpec{} + + if dcl.IsZeroValue(des.LocationType) || (dcl.IsEmptyValueIndirect(des.LocationType) && dcl.IsEmptyValueIndirect(initial.LocationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LocationType = initial.LocationType + } else { + cDes.LocationType = des.LocationType + } + + return cDes +} + +func canonicalizeZoneResourceSpecSlice(des, initial []ZoneResourceSpec, opts ...dcl.ApplyOption) []ZoneResourceSpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneResourceSpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneResourceSpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneResourceSpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneResourceSpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneResourceSpec(c *Client, des, nw *ZoneResourceSpec) *ZoneResourceSpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneResourceSpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewZoneResourceSpecSet(c *Client, des, nw []ZoneResourceSpec) []ZoneResourceSpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneResourceSpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneResourceSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneResourceSpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneResourceSpecSlice(c *Client, des, nw []ZoneResourceSpec) []ZoneResourceSpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneResourceSpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneResourceSpec(c, &d, &n)) + } + + return items +} + +func canonicalizeZoneAssetStatus(des, initial *ZoneAssetStatus, opts ...dcl.ApplyOption) *ZoneAssetStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ZoneAssetStatus{} + + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + if dcl.IsZeroValue(des.ActiveAssets) || (dcl.IsEmptyValueIndirect(des.ActiveAssets) && dcl.IsEmptyValueIndirect(initial.ActiveAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ActiveAssets = initial.ActiveAssets + } else { + cDes.ActiveAssets = des.ActiveAssets + } + if dcl.IsZeroValue(des.SecurityPolicyApplyingAssets) || (dcl.IsEmptyValueIndirect(des.SecurityPolicyApplyingAssets) && dcl.IsEmptyValueIndirect(initial.SecurityPolicyApplyingAssets)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.SecurityPolicyApplyingAssets = initial.SecurityPolicyApplyingAssets + } else { + cDes.SecurityPolicyApplyingAssets = des.SecurityPolicyApplyingAssets + } + + return cDes +} + +func canonicalizeZoneAssetStatusSlice(des, initial []ZoneAssetStatus, opts ...dcl.ApplyOption) []ZoneAssetStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ZoneAssetStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeZoneAssetStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ZoneAssetStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeZoneAssetStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewZoneAssetStatus(c *Client, des, nw *ZoneAssetStatus) *ZoneAssetStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ZoneAssetStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewZoneAssetStatusSet(c *Client, des, nw []ZoneAssetStatus) []ZoneAssetStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ZoneAssetStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareZoneAssetStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewZoneAssetStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewZoneAssetStatusSlice(c *Client, des, nw []ZoneAssetStatus) []ZoneAssetStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ZoneAssetStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewZoneAssetStatus(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffZone(c *Client, desired, actual *Zone, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiscoverySpec, actual.DiscoverySpec, dcl.DiffInfo{ObjectFunction: compareZoneDiscoverySpecNewStyle, EmptyObject: EmptyZoneDiscoverySpec, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DiscoverySpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceSpec, actual.ResourceSpec, dcl.DiffInfo{ObjectFunction: compareZoneResourceSpecNewStyle, EmptyObject: EmptyZoneResourceSpec, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AssetStatus, actual.AssetStatus, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareZoneAssetStatusNewStyle, EmptyObject: EmptyZoneAssetStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AssetStatus")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Lake, actual.Lake, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Lake")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareZoneDiscoverySpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpec) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpec or *ZoneDiscoverySpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpec) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IncludePatterns, actual.IncludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("IncludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExcludePatterns, actual.ExcludePatterns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("ExcludePatterns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CsvOptions, actual.CsvOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareZoneDiscoverySpecCsvOptionsNewStyle, EmptyObject: EmptyZoneDiscoverySpecCsvOptions, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("CsvOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JsonOptions, actual.JsonOptions, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareZoneDiscoverySpecJsonOptionsNewStyle, EmptyObject: EmptyZoneDiscoverySpecJsonOptions, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("JsonOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Schedule, actual.Schedule, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Schedule")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneDiscoverySpecCsvOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpecCsvOptions) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecCsvOptions or *ZoneDiscoverySpecCsvOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpecCsvOptions) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpecCsvOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecCsvOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HeaderRows, actual.HeaderRows, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("HeaderRows")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Delimiter, actual.Delimiter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Delimiter")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneDiscoverySpecJsonOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneDiscoverySpecJsonOptions) + if !ok { + desiredNotPointer, ok := d.(ZoneDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecJsonOptions or *ZoneDiscoverySpecJsonOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneDiscoverySpecJsonOptions) + if !ok { + actualNotPointer, ok := a.(ZoneDiscoverySpecJsonOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneDiscoverySpecJsonOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Encoding, actual.Encoding, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("Encoding")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisableTypeInference, actual.DisableTypeInference, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("DisableTypeInference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneResourceSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneResourceSpec) + if !ok { + desiredNotPointer, ok := d.(ZoneResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneResourceSpec or *ZoneResourceSpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneResourceSpec) + if !ok { + actualNotPointer, ok := a.(ZoneResourceSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneResourceSpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.LocationType, actual.LocationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareZoneAssetStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ZoneAssetStatus) + if !ok { + desiredNotPointer, ok := d.(ZoneAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneAssetStatus or *ZoneAssetStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ZoneAssetStatus) + if !ok { + actualNotPointer, ok := a.(ZoneAssetStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ZoneAssetStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ActiveAssets, actual.ActiveAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("ActiveAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityPolicyApplyingAssets, actual.SecurityPolicyApplyingAssets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateZoneUpdateZoneOperation")}, fn.AddNest("SecurityPolicyApplyingAssets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Zone) urlNormalized() *Zone { + normalized := dcl.Copy(*r).(Zone) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Lake = dcl.SelfLinkToName(r.Lake) + return &normalized +} + +func (r *Zone) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateZone" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "lake": dcl.ValueOrEmptyString(nr.Lake), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Zone resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Zone) marshal(c *Client) ([]byte, error) { + m, err := expandZone(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Zone: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalZone decodes JSON responses into the Zone resource schema. +func unmarshalZone(b []byte, c *Client, res *Zone) (*Zone, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapZone(m, c, res) +} + +func unmarshalMapZone(m map[string]interface{}, c *Client, res *Zone) (*Zone, error) { + + flattened := flattenZone(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandZone expands Zone into a JSON request object. +func expandZone(c *Client, f *Zone) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/lakes/%s/zones/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Lake), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Type; dcl.ValueShouldBeSent(v) { + m["type"] = v + } + if v, err := expandZoneDiscoverySpec(c, f.DiscoverySpec, res); err != nil { + return nil, fmt.Errorf("error expanding DiscoverySpec into discoverySpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["discoverySpec"] = v + } + if v, err := expandZoneResourceSpec(c, f.ResourceSpec, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceSpec into resourceSpec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceSpec"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Lake into lake: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lake"] = v + } + + return m, nil +} + +// flattenZone flattens Zone from a JSON request object into the +// Zone type. +func flattenZone(c *Client, i interface{}, res *Zone) *Zone { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Zone{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenZoneStateEnum(m["state"]) + resultRes.Type = flattenZoneTypeEnum(m["type"]) + resultRes.DiscoverySpec = flattenZoneDiscoverySpec(c, m["discoverySpec"], res) + resultRes.ResourceSpec = flattenZoneResourceSpec(c, m["resourceSpec"], res) + resultRes.AssetStatus = flattenZoneAssetStatus(c, m["assetStatus"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Lake = dcl.FlattenString(m["lake"]) + + return resultRes +} + +// expandZoneDiscoverySpecMap expands the contents of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpecMap(c *Client, f map[string]ZoneDiscoverySpec, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecSlice expands the contents of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpecSlice(c *Client, f []ZoneDiscoverySpec, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecMap flattens the contents of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpecMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpec{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpec{} + } + + items := make(map[string]ZoneDiscoverySpec) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecSlice flattens the contents of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpecSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpec { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpec{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpec{} + } + + items := make([]ZoneDiscoverySpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpec expands an instance of ZoneDiscoverySpec into a JSON +// request object. +func expandZoneDiscoverySpec(c *Client, f *ZoneDiscoverySpec, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.IncludePatterns; v != nil { + m["includePatterns"] = v + } + if v := f.ExcludePatterns; v != nil { + m["excludePatterns"] = v + } + if v, err := expandZoneDiscoverySpecCsvOptions(c, f.CsvOptions, res); err != nil { + return nil, fmt.Errorf("error expanding CsvOptions into csvOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["csvOptions"] = v + } + if v, err := expandZoneDiscoverySpecJsonOptions(c, f.JsonOptions, res); err != nil { + return nil, fmt.Errorf("error expanding JsonOptions into jsonOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["jsonOptions"] = v + } + if v := f.Schedule; !dcl.IsEmptyValueIndirect(v) { + m["schedule"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpec flattens an instance of ZoneDiscoverySpec from a JSON +// response object. +func flattenZoneDiscoverySpec(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpec + } + r.Enabled = flattenZoneDiscoverySpecEnable(c, m["enabled"], res) + r.IncludePatterns = dcl.FlattenStringSlice(m["includePatterns"]) + r.ExcludePatterns = dcl.FlattenStringSlice(m["excludePatterns"]) + r.CsvOptions = flattenZoneDiscoverySpecCsvOptions(c, m["csvOptions"], res) + r.JsonOptions = flattenZoneDiscoverySpecJsonOptions(c, m["jsonOptions"], res) + r.Schedule = dcl.FlattenString(m["schedule"]) + + return r +} + +// expandZoneDiscoverySpecCsvOptionsMap expands the contents of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptionsMap(c *Client, f map[string]ZoneDiscoverySpecCsvOptions, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecCsvOptionsSlice expands the contents of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptionsSlice(c *Client, f []ZoneDiscoverySpecCsvOptions, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpecCsvOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecCsvOptionsMap flattens the contents of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptionsMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpecCsvOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpecCsvOptions{} + } + + items := make(map[string]ZoneDiscoverySpecCsvOptions) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecCsvOptionsSlice flattens the contents of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptionsSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpecCsvOptions { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpecCsvOptions{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpecCsvOptions{} + } + + items := make([]ZoneDiscoverySpecCsvOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpecCsvOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpecCsvOptions expands an instance of ZoneDiscoverySpecCsvOptions into a JSON +// request object. +func expandZoneDiscoverySpecCsvOptions(c *Client, f *ZoneDiscoverySpecCsvOptions, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HeaderRows; !dcl.IsEmptyValueIndirect(v) { + m["headerRows"] = v + } + if v := f.Delimiter; !dcl.IsEmptyValueIndirect(v) { + m["delimiter"] = v + } + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpecCsvOptions flattens an instance of ZoneDiscoverySpecCsvOptions from a JSON +// response object. +func flattenZoneDiscoverySpecCsvOptions(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpecCsvOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpecCsvOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpecCsvOptions + } + r.HeaderRows = dcl.FlattenInteger(m["headerRows"]) + r.Delimiter = dcl.FlattenString(m["delimiter"]) + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandZoneDiscoverySpecJsonOptionsMap expands the contents of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptionsMap(c *Client, f map[string]ZoneDiscoverySpecJsonOptions, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneDiscoverySpecJsonOptionsSlice expands the contents of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptionsSlice(c *Client, f []ZoneDiscoverySpecJsonOptions, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneDiscoverySpecJsonOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneDiscoverySpecJsonOptionsMap flattens the contents of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptionsMap(c *Client, i interface{}, res *Zone) map[string]ZoneDiscoverySpecJsonOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return map[string]ZoneDiscoverySpecJsonOptions{} + } + + items := make(map[string]ZoneDiscoverySpecJsonOptions) + for k, item := range a { + items[k] = *flattenZoneDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneDiscoverySpecJsonOptionsSlice flattens the contents of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptionsSlice(c *Client, i interface{}, res *Zone) []ZoneDiscoverySpecJsonOptions { + a, ok := i.([]interface{}) + if !ok { + return []ZoneDiscoverySpecJsonOptions{} + } + + if len(a) == 0 { + return []ZoneDiscoverySpecJsonOptions{} + } + + items := make([]ZoneDiscoverySpecJsonOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneDiscoverySpecJsonOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneDiscoverySpecJsonOptions expands an instance of ZoneDiscoverySpecJsonOptions into a JSON +// request object. +func expandZoneDiscoverySpecJsonOptions(c *Client, f *ZoneDiscoverySpecJsonOptions, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Encoding; !dcl.IsEmptyValueIndirect(v) { + m["encoding"] = v + } + if v := f.DisableTypeInference; !dcl.IsEmptyValueIndirect(v) { + m["disableTypeInference"] = v + } + + return m, nil +} + +// flattenZoneDiscoverySpecJsonOptions flattens an instance of ZoneDiscoverySpecJsonOptions from a JSON +// response object. +func flattenZoneDiscoverySpecJsonOptions(c *Client, i interface{}, res *Zone) *ZoneDiscoverySpecJsonOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneDiscoverySpecJsonOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneDiscoverySpecJsonOptions + } + r.Encoding = dcl.FlattenString(m["encoding"]) + r.DisableTypeInference = dcl.FlattenBool(m["disableTypeInference"]) + + return r +} + +// expandZoneResourceSpecMap expands the contents of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpecMap(c *Client, f map[string]ZoneResourceSpec, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneResourceSpecSlice expands the contents of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpecSlice(c *Client, f []ZoneResourceSpec, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneResourceSpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneResourceSpecMap flattens the contents of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpecMap(c *Client, i interface{}, res *Zone) map[string]ZoneResourceSpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneResourceSpec{} + } + + if len(a) == 0 { + return map[string]ZoneResourceSpec{} + } + + items := make(map[string]ZoneResourceSpec) + for k, item := range a { + items[k] = *flattenZoneResourceSpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneResourceSpecSlice flattens the contents of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpecSlice(c *Client, i interface{}, res *Zone) []ZoneResourceSpec { + a, ok := i.([]interface{}) + if !ok { + return []ZoneResourceSpec{} + } + + if len(a) == 0 { + return []ZoneResourceSpec{} + } + + items := make([]ZoneResourceSpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneResourceSpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneResourceSpec expands an instance of ZoneResourceSpec into a JSON +// request object. +func expandZoneResourceSpec(c *Client, f *ZoneResourceSpec, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.LocationType; !dcl.IsEmptyValueIndirect(v) { + m["locationType"] = v + } + + return m, nil +} + +// flattenZoneResourceSpec flattens an instance of ZoneResourceSpec from a JSON +// response object. +func flattenZoneResourceSpec(c *Client, i interface{}, res *Zone) *ZoneResourceSpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneResourceSpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneResourceSpec + } + r.LocationType = flattenZoneResourceSpecLocationTypeEnum(m["locationType"]) + + return r +} + +// expandZoneAssetStatusMap expands the contents of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatusMap(c *Client, f map[string]ZoneAssetStatus, res *Zone) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandZoneAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandZoneAssetStatusSlice expands the contents of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatusSlice(c *Client, f []ZoneAssetStatus, res *Zone) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandZoneAssetStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenZoneAssetStatusMap flattens the contents of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatusMap(c *Client, i interface{}, res *Zone) map[string]ZoneAssetStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneAssetStatus{} + } + + if len(a) == 0 { + return map[string]ZoneAssetStatus{} + } + + items := make(map[string]ZoneAssetStatus) + for k, item := range a { + items[k] = *flattenZoneAssetStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenZoneAssetStatusSlice flattens the contents of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatusSlice(c *Client, i interface{}, res *Zone) []ZoneAssetStatus { + a, ok := i.([]interface{}) + if !ok { + return []ZoneAssetStatus{} + } + + if len(a) == 0 { + return []ZoneAssetStatus{} + } + + items := make([]ZoneAssetStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneAssetStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandZoneAssetStatus expands an instance of ZoneAssetStatus into a JSON +// request object. +func expandZoneAssetStatus(c *Client, f *ZoneAssetStatus, res *Zone) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UpdateTime; !dcl.IsEmptyValueIndirect(v) { + m["updateTime"] = v + } + if v := f.ActiveAssets; !dcl.IsEmptyValueIndirect(v) { + m["activeAssets"] = v + } + if v := f.SecurityPolicyApplyingAssets; !dcl.IsEmptyValueIndirect(v) { + m["securityPolicyApplyingAssets"] = v + } + + return m, nil +} + +// flattenZoneAssetStatus flattens an instance of ZoneAssetStatus from a JSON +// response object. +func flattenZoneAssetStatus(c *Client, i interface{}, res *Zone) *ZoneAssetStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ZoneAssetStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyZoneAssetStatus + } + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + r.ActiveAssets = dcl.FlattenInteger(m["activeAssets"]) + r.SecurityPolicyApplyingAssets = dcl.FlattenInteger(m["securityPolicyApplyingAssets"]) + + return r +} + +// flattenZoneStateEnumMap flattens the contents of ZoneStateEnum from a JSON +// response object. +func flattenZoneStateEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneStateEnum{} + } + + if len(a) == 0 { + return map[string]ZoneStateEnum{} + } + + items := make(map[string]ZoneStateEnum) + for k, item := range a { + items[k] = *flattenZoneStateEnum(item.(interface{})) + } + + return items +} + +// flattenZoneStateEnumSlice flattens the contents of ZoneStateEnum from a JSON +// response object. +func flattenZoneStateEnumSlice(c *Client, i interface{}, res *Zone) []ZoneStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneStateEnum{} + } + + if len(a) == 0 { + return []ZoneStateEnum{} + } + + items := make([]ZoneStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneStateEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneStateEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneStateEnum with the same value as that string. +func flattenZoneStateEnum(i interface{}) *ZoneStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneStateEnumRef(s) +} + +// flattenZoneTypeEnumMap flattens the contents of ZoneTypeEnum from a JSON +// response object. +func flattenZoneTypeEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneTypeEnum{} + } + + if len(a) == 0 { + return map[string]ZoneTypeEnum{} + } + + items := make(map[string]ZoneTypeEnum) + for k, item := range a { + items[k] = *flattenZoneTypeEnum(item.(interface{})) + } + + return items +} + +// flattenZoneTypeEnumSlice flattens the contents of ZoneTypeEnum from a JSON +// response object. +func flattenZoneTypeEnumSlice(c *Client, i interface{}, res *Zone) []ZoneTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneTypeEnum{} + } + + if len(a) == 0 { + return []ZoneTypeEnum{} + } + + items := make([]ZoneTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneTypeEnum with the same value as that string. +func flattenZoneTypeEnum(i interface{}) *ZoneTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneTypeEnumRef(s) +} + +// flattenZoneResourceSpecLocationTypeEnumMap flattens the contents of ZoneResourceSpecLocationTypeEnum from a JSON +// response object. +func flattenZoneResourceSpecLocationTypeEnumMap(c *Client, i interface{}, res *Zone) map[string]ZoneResourceSpecLocationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ZoneResourceSpecLocationTypeEnum{} + } + + if len(a) == 0 { + return map[string]ZoneResourceSpecLocationTypeEnum{} + } + + items := make(map[string]ZoneResourceSpecLocationTypeEnum) + for k, item := range a { + items[k] = *flattenZoneResourceSpecLocationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenZoneResourceSpecLocationTypeEnumSlice flattens the contents of ZoneResourceSpecLocationTypeEnum from a JSON +// response object. +func flattenZoneResourceSpecLocationTypeEnumSlice(c *Client, i interface{}, res *Zone) []ZoneResourceSpecLocationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ZoneResourceSpecLocationTypeEnum{} + } + + if len(a) == 0 { + return []ZoneResourceSpecLocationTypeEnum{} + } + + items := make([]ZoneResourceSpecLocationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenZoneResourceSpecLocationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenZoneResourceSpecLocationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ZoneResourceSpecLocationTypeEnum with the same value as that string. +func flattenZoneResourceSpecLocationTypeEnum(i interface{}) *ZoneResourceSpecLocationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ZoneResourceSpecLocationTypeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Zone) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalZone(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Lake == nil && ncr.Lake == nil { + c.Config.Logger.Info("Both Lake fields null - considering equal.") + } else if nr.Lake == nil || ncr.Lake == nil { + c.Config.Logger.Info("Only one Lake field is null - considering unequal.") + return false + } else if *nr.Lake != *ncr.Lake { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type zoneDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp zoneApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToZoneDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]zoneDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []zoneDiff + // For each operation name, create a zoneDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := zoneDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToZoneApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToZoneApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (zoneApiOperation, error) { + switch opName { + + case "updateZoneUpdateZoneOperation": + return &updateZoneUpdateZoneOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractZoneFields(r *Zone) error { + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &ZoneDiscoverySpec{} + } + if err := extractZoneDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &ZoneResourceSpec{} + } + if err := extractZoneResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &ZoneAssetStatus{} + } + if err := extractZoneAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + return nil +} +func extractZoneDiscoverySpecFields(r *Zone, o *ZoneDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &ZoneDiscoverySpecCsvOptions{} + } + if err := extractZoneDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &ZoneDiscoverySpecJsonOptions{} + } + if err := extractZoneDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func extractZoneDiscoverySpecCsvOptionsFields(r *Zone, o *ZoneDiscoverySpecCsvOptions) error { + return nil +} +func extractZoneDiscoverySpecJsonOptionsFields(r *Zone, o *ZoneDiscoverySpecJsonOptions) error { + return nil +} +func extractZoneResourceSpecFields(r *Zone, o *ZoneResourceSpec) error { + return nil +} +func extractZoneAssetStatusFields(r *Zone, o *ZoneAssetStatus) error { + return nil +} + +func postReadExtractZoneFields(r *Zone) error { + vDiscoverySpec := r.DiscoverySpec + if vDiscoverySpec == nil { + // note: explicitly not the empty object. + vDiscoverySpec = &ZoneDiscoverySpec{} + } + if err := postReadExtractZoneDiscoverySpecFields(r, vDiscoverySpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiscoverySpec) { + r.DiscoverySpec = vDiscoverySpec + } + vResourceSpec := r.ResourceSpec + if vResourceSpec == nil { + // note: explicitly not the empty object. + vResourceSpec = &ZoneResourceSpec{} + } + if err := postReadExtractZoneResourceSpecFields(r, vResourceSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceSpec) { + r.ResourceSpec = vResourceSpec + } + vAssetStatus := r.AssetStatus + if vAssetStatus == nil { + // note: explicitly not the empty object. + vAssetStatus = &ZoneAssetStatus{} + } + if err := postReadExtractZoneAssetStatusFields(r, vAssetStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAssetStatus) { + r.AssetStatus = vAssetStatus + } + return nil +} +func postReadExtractZoneDiscoverySpecFields(r *Zone, o *ZoneDiscoverySpec) error { + vCsvOptions := o.CsvOptions + if vCsvOptions == nil { + // note: explicitly not the empty object. + vCsvOptions = &ZoneDiscoverySpecCsvOptions{} + } + if err := extractZoneDiscoverySpecCsvOptionsFields(r, vCsvOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCsvOptions) { + o.CsvOptions = vCsvOptions + } + vJsonOptions := o.JsonOptions + if vJsonOptions == nil { + // note: explicitly not the empty object. + vJsonOptions = &ZoneDiscoverySpecJsonOptions{} + } + if err := extractZoneDiscoverySpecJsonOptionsFields(r, vJsonOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vJsonOptions) { + o.JsonOptions = vJsonOptions + } + return nil +} +func postReadExtractZoneDiscoverySpecCsvOptionsFields(r *Zone, o *ZoneDiscoverySpecCsvOptions) error { + return nil +} +func postReadExtractZoneDiscoverySpecJsonOptionsFields(r *Zone, o *ZoneDiscoverySpecJsonOptions) error { + return nil +} +func postReadExtractZoneResourceSpecFields(r *Zone, o *ZoneResourceSpec) error { + return nil +} +func postReadExtractZoneAssetStatusFields(r *Zone, o *ZoneAssetStatus) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl new file mode 100644 index 000000000000..7216afa27c4a --- /dev/null +++ b/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl @@ -0,0 +1,376 @@ +package dataplex + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLZoneSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataplex/Zone", + Description: "The Dataplex Zone resource", + StructName: "Zone", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Zone", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "zone", + Required: true, + Description: "A full instance of a Zone", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Zone", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "zone", + Required: true, + Description: "A full instance of a Zone", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Zone", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "zone", + Required: true, + Description: "A full instance of a Zone", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Zone", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "lake", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Zone", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "lake", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Zone": &dcl.Component{ + Title: "Zone", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "type", + "discoverySpec", + "resourceSpec", + "project", + "location", + "lake", + }, + Properties: map[string]*dcl.Property{ + "assetStatus": &dcl.Property{ + Type: "object", + GoName: "AssetStatus", + GoType: "ZoneAssetStatus", + ReadOnly: true, + Description: "Output only. Aggregated status of the underlying assets of the zone.", + Properties: map[string]*dcl.Property{ + "activeAssets": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "ActiveAssets", + Description: "Number of active assets.", + }, + "securityPolicyApplyingAssets": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "SecurityPolicyApplyingAssets", + Description: "Number of assets that are in process of updating the security policy on attached resources.", + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + Description: "Last update time of the status.", + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time when the zone was created.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. Description of the zone.", + }, + "discoverySpec": &dcl.Property{ + Type: "object", + GoName: "DiscoverySpec", + GoType: "ZoneDiscoverySpec", + Description: "Required. Specification of the discovery feature applied to data in this zone.", + Required: []string{ + "enabled", + }, + Properties: map[string]*dcl.Property{ + "csvOptions": &dcl.Property{ + Type: "object", + GoName: "CsvOptions", + GoType: "ZoneDiscoverySpecCsvOptions", + Description: "Optional. Configuration for CSV data.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "delimiter": &dcl.Property{ + Type: "string", + GoName: "Delimiter", + Description: "Optional. The delimiter being used to separate values. This defaults to ','.", + }, + "disableTypeInference": &dcl.Property{ + Type: "boolean", + GoName: "DisableTypeInference", + Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", + }, + "encoding": &dcl.Property{ + Type: "string", + GoName: "Encoding", + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + "headerRows": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "HeaderRows", + Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", + }, + }, + }, + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Required. Whether discovery is enabled.", + }, + "excludePatterns": &dcl.Property{ + Type: "array", + GoName: "ExcludePatterns", + Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "includePatterns": &dcl.Property{ + Type: "array", + GoName: "IncludePatterns", + Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "jsonOptions": &dcl.Property{ + Type: "object", + GoName: "JsonOptions", + GoType: "ZoneDiscoverySpecJsonOptions", + Description: "Optional. Configuration for Json data.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "disableTypeInference": &dcl.Property{ + Type: "boolean", + GoName: "DisableTypeInference", + Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", + }, + "encoding": &dcl.Property{ + Type: "string", + GoName: "Encoding", + Description: "Optional. The character encoding of the data. The default is UTF-8.", + }, + }, + }, + "schedule": &dcl.Property{ + Type: "string", + GoName: "Schedule", + Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", + ServerDefault: true, + }, + }, + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Optional. User friendly display name.", + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. User defined labels for the zone.", + }, + "lake": &dcl.Property{ + Type: "string", + GoName: "Lake", + Description: "The lake for the resource", + Immutable: true, + Parameter: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The name of the zone.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Dataplex/Zone", + Field: "selfLink", + Parent: true, + }, + }, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "resourceSpec": &dcl.Property{ + Type: "object", + GoName: "ResourceSpec", + GoType: "ZoneResourceSpec", + Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", + Immutable: true, + Required: []string{ + "locationType", + }, + Properties: map[string]*dcl.Property{ + "locationType": &dcl.Property{ + Type: "string", + GoName: "LocationType", + GoType: "ZoneResourceSpecLocationTypeEnum", + Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", + Immutable: true, + Enum: []string{ + "LOCATION_TYPE_UNSPECIFIED", + "SINGLE_REGION", + "MULTI_REGION", + }, + }, + }, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "ZoneStateEnum", + ReadOnly: true, + Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "ACTIVE", + "CREATING", + "DELETING", + "ACTION_REQUIRED", + }, + }, + "type": &dcl.Property{ + Type: "string", + GoName: "Type", + GoType: "ZoneTypeEnum", + Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", + Immutable: true, + Enum: []string{ + "TYPE_UNSPECIFIED", + "RAW", + "CURATED", + }, + }, + "uid": &dcl.Property{ + Type: "string", + GoName: "Uid", + ReadOnly: true, + Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time when the zone was last updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl new file mode 100644 index 000000000000..311864119df1 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl @@ -0,0 +1,571 @@ +package dataproc + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + "google.golang.org/api/googleapi" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type AutoscalingPolicy struct { + Name *string `json:"name"` + BasicAlgorithm *AutoscalingPolicyBasicAlgorithm `json:"basicAlgorithm"` + WorkerConfig *AutoscalingPolicyWorkerConfig `json:"workerConfig"` + SecondaryWorkerConfig *AutoscalingPolicySecondaryWorkerConfig `json:"secondaryWorkerConfig"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *AutoscalingPolicy) String() string { + return dcl.SprintResource(r) +} + +type AutoscalingPolicyBasicAlgorithm struct { + empty bool `json:"-"` + YarnConfig *AutoscalingPolicyBasicAlgorithmYarnConfig `json:"yarnConfig"` + CooldownPeriod *string `json:"cooldownPeriod"` +} + +type jsonAutoscalingPolicyBasicAlgorithm AutoscalingPolicyBasicAlgorithm + +func (r *AutoscalingPolicyBasicAlgorithm) UnmarshalJSON(data []byte) error { + var res jsonAutoscalingPolicyBasicAlgorithm + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAutoscalingPolicyBasicAlgorithm + } else { + + r.YarnConfig = res.YarnConfig + + r.CooldownPeriod = res.CooldownPeriod + + } + return nil +} + +// This object is used to assert a desired state where this AutoscalingPolicyBasicAlgorithm is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAutoscalingPolicyBasicAlgorithm *AutoscalingPolicyBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{empty: true} + +func (r *AutoscalingPolicyBasicAlgorithm) Empty() bool { + return r.empty +} + +func (r *AutoscalingPolicyBasicAlgorithm) String() string { + return dcl.SprintResource(r) +} + +func (r *AutoscalingPolicyBasicAlgorithm) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AutoscalingPolicyBasicAlgorithmYarnConfig struct { + empty bool `json:"-"` + GracefulDecommissionTimeout *string `json:"gracefulDecommissionTimeout"` + ScaleUpFactor *float64 `json:"scaleUpFactor"` + ScaleDownFactor *float64 `json:"scaleDownFactor"` + ScaleUpMinWorkerFraction *float64 `json:"scaleUpMinWorkerFraction"` + ScaleDownMinWorkerFraction *float64 `json:"scaleDownMinWorkerFraction"` +} + +type jsonAutoscalingPolicyBasicAlgorithmYarnConfig AutoscalingPolicyBasicAlgorithmYarnConfig + +func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) UnmarshalJSON(data []byte) error { + var res jsonAutoscalingPolicyBasicAlgorithmYarnConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAutoscalingPolicyBasicAlgorithmYarnConfig + } else { + + r.GracefulDecommissionTimeout = res.GracefulDecommissionTimeout + + r.ScaleUpFactor = res.ScaleUpFactor + + r.ScaleDownFactor = res.ScaleDownFactor + + r.ScaleUpMinWorkerFraction = res.ScaleUpMinWorkerFraction + + r.ScaleDownMinWorkerFraction = res.ScaleDownMinWorkerFraction + + } + return nil +} + +// This object is used to assert a desired state where this AutoscalingPolicyBasicAlgorithmYarnConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAutoscalingPolicyBasicAlgorithmYarnConfig *AutoscalingPolicyBasicAlgorithmYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{empty: true} + +func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) Empty() bool { + return r.empty +} + +func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AutoscalingPolicyWorkerConfig struct { + empty bool `json:"-"` + MinInstances *int64 `json:"minInstances"` + MaxInstances *int64 `json:"maxInstances"` + Weight *int64 `json:"weight"` +} + +type jsonAutoscalingPolicyWorkerConfig AutoscalingPolicyWorkerConfig + +func (r *AutoscalingPolicyWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonAutoscalingPolicyWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAutoscalingPolicyWorkerConfig + } else { + + r.MinInstances = res.MinInstances + + r.MaxInstances = res.MaxInstances + + r.Weight = res.Weight + + } + return nil +} + +// This object is used to assert a desired state where this AutoscalingPolicyWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAutoscalingPolicyWorkerConfig *AutoscalingPolicyWorkerConfig = &AutoscalingPolicyWorkerConfig{empty: true} + +func (r *AutoscalingPolicyWorkerConfig) Empty() bool { + return r.empty +} + +func (r *AutoscalingPolicyWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *AutoscalingPolicyWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type AutoscalingPolicySecondaryWorkerConfig struct { + empty bool `json:"-"` + MinInstances *int64 `json:"minInstances"` + MaxInstances *int64 `json:"maxInstances"` + Weight *int64 `json:"weight"` +} + +type jsonAutoscalingPolicySecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfig + +func (r *AutoscalingPolicySecondaryWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonAutoscalingPolicySecondaryWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyAutoscalingPolicySecondaryWorkerConfig + } else { + + r.MinInstances = res.MinInstances + + r.MaxInstances = res.MaxInstances + + r.Weight = res.Weight + + } + return nil +} + +// This object is used to assert a desired state where this AutoscalingPolicySecondaryWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyAutoscalingPolicySecondaryWorkerConfig *AutoscalingPolicySecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{empty: true} + +func (r *AutoscalingPolicySecondaryWorkerConfig) Empty() bool { + return r.empty +} + +func (r *AutoscalingPolicySecondaryWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *AutoscalingPolicySecondaryWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *AutoscalingPolicy) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataproc", + Type: "AutoscalingPolicy", + Version: "beta", + } +} + +func (r *AutoscalingPolicy) ID() (string, error) { + if err := extractAutoscalingPolicyFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "basic_algorithm": dcl.ValueOrEmptyString(nr.BasicAlgorithm), + "worker_config": dcl.ValueOrEmptyString(nr.WorkerConfig), + "secondary_worker_config": dcl.ValueOrEmptyString(nr.SecondaryWorkerConfig), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const AutoscalingPolicyMaxPage = -1 + +type AutoscalingPolicyList struct { + Items []*AutoscalingPolicy + + nextToken string + + pageSize int32 + + resource *AutoscalingPolicy +} + +func (l *AutoscalingPolicyList) HasNext() bool { + return l.nextToken != "" +} + +func (l *AutoscalingPolicyList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listAutoscalingPolicy(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListAutoscalingPolicy(ctx context.Context, project, location string) (*AutoscalingPolicyList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListAutoscalingPolicyWithMaxResults(ctx, project, location, AutoscalingPolicyMaxPage) + +} + +func (c *Client) ListAutoscalingPolicyWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*AutoscalingPolicyList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &AutoscalingPolicy{ + Project: &project, + Location: &location, + } + items, token, err := c.listAutoscalingPolicy(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &AutoscalingPolicyList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) (*AutoscalingPolicy, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractAutoscalingPolicyFields(r) + + b, err := c.getAutoscalingPolicyRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalAutoscalingPolicy(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeAutoscalingPolicyNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractAutoscalingPolicyFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("AutoscalingPolicy resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting AutoscalingPolicy...") + deleteOp := deleteAutoscalingPolicyOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllAutoscalingPolicy deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllAutoscalingPolicy(ctx context.Context, project, location string, filter func(*AutoscalingPolicy) bool) error { + listObj, err := c.ListAutoscalingPolicy(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllAutoscalingPolicy(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllAutoscalingPolicy(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyAutoscalingPolicy(ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *AutoscalingPolicy + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyAutoscalingPolicyHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyAutoscalingPolicyHelper(c *Client, ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyAutoscalingPolicy...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractAutoscalingPolicyFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.autoscalingPolicyDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToAutoscalingPolicyDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []autoscalingPolicyApiOperation + if create { + ops = append(ops, &createAutoscalingPolicyOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyAutoscalingPolicyDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyAutoscalingPolicyDiff(c *Client, ctx context.Context, desired *AutoscalingPolicy, rawDesired *AutoscalingPolicy, ops []autoscalingPolicyApiOperation, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetAutoscalingPolicy(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createAutoscalingPolicyOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapAutoscalingPolicy(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeAutoscalingPolicyNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeAutoscalingPolicyNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeAutoscalingPolicyDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractAutoscalingPolicyFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractAutoscalingPolicyFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffAutoscalingPolicy(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl new file mode 100644 index 000000000000..9d3c4c9197a8 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl @@ -0,0 +1,2102 @@ +package dataproc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *AutoscalingPolicy) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "basicAlgorithm"); err != nil { + return err + } + if err := dcl.Required(r, "workerConfig"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.BasicAlgorithm) { + if err := r.BasicAlgorithm.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { + if err := r.SecondaryWorkerConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *AutoscalingPolicyBasicAlgorithm) validate() error { + if err := dcl.Required(r, "yarnConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.YarnConfig) { + if err := r.YarnConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) validate() error { + if err := dcl.Required(r, "gracefulDecommissionTimeout"); err != nil { + return err + } + if err := dcl.Required(r, "scaleUpFactor"); err != nil { + return err + } + if err := dcl.Required(r, "scaleDownFactor"); err != nil { + return err + } + return nil +} +func (r *AutoscalingPolicyWorkerConfig) validate() error { + if err := dcl.Required(r, "maxInstances"); err != nil { + return err + } + return nil +} +func (r *AutoscalingPolicySecondaryWorkerConfig) validate() error { + return nil +} +func (r *AutoscalingPolicy) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) +} + +func (r *AutoscalingPolicy) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *AutoscalingPolicy) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies", nr.basePath(), userBasePath, params), nil + +} + +func (r *AutoscalingPolicy) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies", nr.basePath(), userBasePath, params), nil + +} + +func (r *AutoscalingPolicy) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// autoscalingPolicyApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type autoscalingPolicyApiOperation interface { + do(context.Context, *AutoscalingPolicy, *Client) error +} + +// newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest creates a request for an +// AutoscalingPolicy resource's UpdateAutoscalingPolicy update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(ctx context.Context, f *AutoscalingPolicy, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := expandAutoscalingPolicyBasicAlgorithm(c, f.BasicAlgorithm, res); err != nil { + return nil, fmt.Errorf("error expanding BasicAlgorithm into basicAlgorithm: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["basicAlgorithm"] = v + } + if v, err := expandAutoscalingPolicyWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["workerConfig"] = v + } + if v, err := expandAutoscalingPolicySecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["secondaryWorkerConfig"] = v + } + if v, err := dcl.DeriveField("%s", f.Name); err != nil { + return nil, err + } else { + req["id"] = v + } + + return req, nil +} + +// marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest converts the update into +// the final JSON request body. +func marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateAutoscalingPolicyUpdateAutoscalingPolicyOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateAutoscalingPolicyUpdateAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { + _, err := c.GetAutoscalingPolicy(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateAutoscalingPolicy") + if err != nil { + return err + } + + req, err := newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(c, req) + if err != nil { + return err + } + _, err = dcl.SendRequest(ctx, c.Config, "PUT", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + return nil +} + +func (c *Client) listAutoscalingPolicyRaw(ctx context.Context, r *AutoscalingPolicy, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != AutoscalingPolicyMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listAutoscalingPolicyOperation struct { + Policies []map[string]interface{} `json:"policies"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy, pageToken string, pageSize int32) ([]*AutoscalingPolicy, string, error) { + b, err := c.listAutoscalingPolicyRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listAutoscalingPolicyOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*AutoscalingPolicy + for _, v := range m.Policies { + res, err := unmarshalMapAutoscalingPolicy(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllAutoscalingPolicy(ctx context.Context, f func(*AutoscalingPolicy) bool, resources []*AutoscalingPolicy) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteAutoscalingPolicy(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteAutoscalingPolicyOperation struct{} + +func (op *deleteAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { + r, err := c.GetAutoscalingPolicy(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "AutoscalingPolicy not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetAutoscalingPolicy checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete AutoscalingPolicy: %w", err) + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetAutoscalingPolicy(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createAutoscalingPolicyOperation struct { + response map[string]interface{} +} + +func (op *createAutoscalingPolicyOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + if _, err := c.GetAutoscalingPolicy(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getAutoscalingPolicyRaw(ctx context.Context, r *AutoscalingPolicy) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) autoscalingPolicyDiffsForRawDesired(ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (initial, desired *AutoscalingPolicy, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *AutoscalingPolicy + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*AutoscalingPolicy); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected AutoscalingPolicy, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetAutoscalingPolicy(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a AutoscalingPolicy resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve AutoscalingPolicy resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that AutoscalingPolicy resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for AutoscalingPolicy: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for AutoscalingPolicy: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractAutoscalingPolicyFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeAutoscalingPolicyInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for AutoscalingPolicy: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for AutoscalingPolicy: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffAutoscalingPolicy(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeAutoscalingPolicyInitialState(rawInitial, rawDesired *AutoscalingPolicy) (*AutoscalingPolicy, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.BasicAlgorithm = canonicalizeAutoscalingPolicyBasicAlgorithm(rawDesired.BasicAlgorithm, nil, opts...) + rawDesired.WorkerConfig = canonicalizeAutoscalingPolicyWorkerConfig(rawDesired.WorkerConfig, nil, opts...) + rawDesired.SecondaryWorkerConfig = canonicalizeAutoscalingPolicySecondaryWorkerConfig(rawDesired.SecondaryWorkerConfig, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &AutoscalingPolicy{} + if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + canonicalDesired.BasicAlgorithm = canonicalizeAutoscalingPolicyBasicAlgorithm(rawDesired.BasicAlgorithm, rawInitial.BasicAlgorithm, opts...) + canonicalDesired.WorkerConfig = canonicalizeAutoscalingPolicyWorkerConfig(rawDesired.WorkerConfig, rawInitial.WorkerConfig, opts...) + canonicalDesired.SecondaryWorkerConfig = canonicalizeAutoscalingPolicySecondaryWorkerConfig(rawDesired.SecondaryWorkerConfig, rawInitial.SecondaryWorkerConfig, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeAutoscalingPolicyNewState(c *Client, rawNew, rawDesired *AutoscalingPolicy) (*AutoscalingPolicy, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.BasicAlgorithm) && dcl.IsEmptyValueIndirect(rawDesired.BasicAlgorithm) { + rawNew.BasicAlgorithm = rawDesired.BasicAlgorithm + } else { + rawNew.BasicAlgorithm = canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, rawDesired.BasicAlgorithm, rawNew.BasicAlgorithm) + } + + if dcl.IsEmptyValueIndirect(rawNew.WorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkerConfig) { + rawNew.WorkerConfig = rawDesired.WorkerConfig + } else { + rawNew.WorkerConfig = canonicalizeNewAutoscalingPolicyWorkerConfig(c, rawDesired.WorkerConfig, rawNew.WorkerConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.SecondaryWorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.SecondaryWorkerConfig) { + rawNew.SecondaryWorkerConfig = rawDesired.SecondaryWorkerConfig + } else { + rawNew.SecondaryWorkerConfig = canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, rawDesired.SecondaryWorkerConfig, rawNew.SecondaryWorkerConfig) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeAutoscalingPolicyBasicAlgorithm(des, initial *AutoscalingPolicyBasicAlgorithm, opts ...dcl.ApplyOption) *AutoscalingPolicyBasicAlgorithm { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AutoscalingPolicyBasicAlgorithm{} + + cDes.YarnConfig = canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(des.YarnConfig, initial.YarnConfig, opts...) + if dcl.StringCanonicalize(des.CooldownPeriod, initial.CooldownPeriod) || dcl.IsZeroValue(des.CooldownPeriod) { + cDes.CooldownPeriod = initial.CooldownPeriod + } else { + cDes.CooldownPeriod = des.CooldownPeriod + } + + return cDes +} + +func canonicalizeAutoscalingPolicyBasicAlgorithmSlice(des, initial []AutoscalingPolicyBasicAlgorithm, opts ...dcl.ApplyOption) []AutoscalingPolicyBasicAlgorithm { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(des)) + for _, d := range des { + cd := canonicalizeAutoscalingPolicyBasicAlgorithm(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(des)) + for i, d := range des { + cd := canonicalizeAutoscalingPolicyBasicAlgorithm(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithm(c *Client, des, nw *AutoscalingPolicyBasicAlgorithm) *AutoscalingPolicyBasicAlgorithm { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyBasicAlgorithm while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.YarnConfig = canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, des.YarnConfig, nw.YarnConfig) + if dcl.StringCanonicalize(des.CooldownPeriod, nw.CooldownPeriod) { + nw.CooldownPeriod = des.CooldownPeriod + } + + return nw +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithmSet(c *Client, des, nw []AutoscalingPolicyBasicAlgorithm) []AutoscalingPolicyBasicAlgorithm { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AutoscalingPolicyBasicAlgorithm + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAutoscalingPolicyBasicAlgorithmNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithmSlice(c *Client, des, nw []AutoscalingPolicyBasicAlgorithm) []AutoscalingPolicyBasicAlgorithm { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AutoscalingPolicyBasicAlgorithm + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, &d, &n)) + } + + return items +} + +func canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(des, initial *AutoscalingPolicyBasicAlgorithmYarnConfig, opts ...dcl.ApplyOption) *AutoscalingPolicyBasicAlgorithmYarnConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AutoscalingPolicyBasicAlgorithmYarnConfig{} + + if dcl.StringCanonicalize(des.GracefulDecommissionTimeout, initial.GracefulDecommissionTimeout) || dcl.IsZeroValue(des.GracefulDecommissionTimeout) { + cDes.GracefulDecommissionTimeout = initial.GracefulDecommissionTimeout + } else { + cDes.GracefulDecommissionTimeout = des.GracefulDecommissionTimeout + } + if dcl.IsZeroValue(des.ScaleUpFactor) || (dcl.IsEmptyValueIndirect(des.ScaleUpFactor) && dcl.IsEmptyValueIndirect(initial.ScaleUpFactor)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScaleUpFactor = initial.ScaleUpFactor + } else { + cDes.ScaleUpFactor = des.ScaleUpFactor + } + if dcl.IsZeroValue(des.ScaleDownFactor) || (dcl.IsEmptyValueIndirect(des.ScaleDownFactor) && dcl.IsEmptyValueIndirect(initial.ScaleDownFactor)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScaleDownFactor = initial.ScaleDownFactor + } else { + cDes.ScaleDownFactor = des.ScaleDownFactor + } + if dcl.IsZeroValue(des.ScaleUpMinWorkerFraction) || (dcl.IsEmptyValueIndirect(des.ScaleUpMinWorkerFraction) && dcl.IsEmptyValueIndirect(initial.ScaleUpMinWorkerFraction)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScaleUpMinWorkerFraction = initial.ScaleUpMinWorkerFraction + } else { + cDes.ScaleUpMinWorkerFraction = des.ScaleUpMinWorkerFraction + } + if dcl.IsZeroValue(des.ScaleDownMinWorkerFraction) || (dcl.IsEmptyValueIndirect(des.ScaleDownMinWorkerFraction) && dcl.IsEmptyValueIndirect(initial.ScaleDownMinWorkerFraction)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScaleDownMinWorkerFraction = initial.ScaleDownMinWorkerFraction + } else { + cDes.ScaleDownMinWorkerFraction = des.ScaleDownMinWorkerFraction + } + + return cDes +} + +func canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfigSlice(des, initial []AutoscalingPolicyBasicAlgorithmYarnConfig, opts ...dcl.ApplyOption) []AutoscalingPolicyBasicAlgorithmYarnConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, des, nw *AutoscalingPolicyBasicAlgorithmYarnConfig) *AutoscalingPolicyBasicAlgorithmYarnConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyBasicAlgorithmYarnConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.GracefulDecommissionTimeout, nw.GracefulDecommissionTimeout) { + nw.GracefulDecommissionTimeout = des.GracefulDecommissionTimeout + } + + return nw +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfigSet(c *Client, des, nw []AutoscalingPolicyBasicAlgorithmYarnConfig) []AutoscalingPolicyBasicAlgorithmYarnConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AutoscalingPolicyBasicAlgorithmYarnConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, des, nw []AutoscalingPolicyBasicAlgorithmYarnConfig) []AutoscalingPolicyBasicAlgorithmYarnConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AutoscalingPolicyBasicAlgorithmYarnConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeAutoscalingPolicyWorkerConfig(des, initial *AutoscalingPolicyWorkerConfig, opts ...dcl.ApplyOption) *AutoscalingPolicyWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AutoscalingPolicyWorkerConfig{} + + if dcl.IsZeroValue(des.MinInstances) || (dcl.IsEmptyValueIndirect(des.MinInstances) && dcl.IsEmptyValueIndirect(initial.MinInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinInstances = initial.MinInstances + } else { + cDes.MinInstances = des.MinInstances + } + if dcl.IsZeroValue(des.MaxInstances) || (dcl.IsEmptyValueIndirect(des.MaxInstances) && dcl.IsEmptyValueIndirect(initial.MaxInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxInstances = initial.MaxInstances + } else { + cDes.MaxInstances = des.MaxInstances + } + if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Weight = initial.Weight + } else { + cDes.Weight = des.Weight + } + + return cDes +} + +func canonicalizeAutoscalingPolicyWorkerConfigSlice(des, initial []AutoscalingPolicyWorkerConfig, opts ...dcl.ApplyOption) []AutoscalingPolicyWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AutoscalingPolicyWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeAutoscalingPolicyWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AutoscalingPolicyWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeAutoscalingPolicyWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAutoscalingPolicyWorkerConfig(c *Client, des, nw *AutoscalingPolicyWorkerConfig) *AutoscalingPolicyWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewAutoscalingPolicyWorkerConfigSet(c *Client, des, nw []AutoscalingPolicyWorkerConfig) []AutoscalingPolicyWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AutoscalingPolicyWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAutoscalingPolicyWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAutoscalingPolicyWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAutoscalingPolicyWorkerConfigSlice(c *Client, des, nw []AutoscalingPolicyWorkerConfig) []AutoscalingPolicyWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AutoscalingPolicyWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAutoscalingPolicyWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeAutoscalingPolicySecondaryWorkerConfig(des, initial *AutoscalingPolicySecondaryWorkerConfig, opts ...dcl.ApplyOption) *AutoscalingPolicySecondaryWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &AutoscalingPolicySecondaryWorkerConfig{} + + if dcl.IsZeroValue(des.MinInstances) || (dcl.IsEmptyValueIndirect(des.MinInstances) && dcl.IsEmptyValueIndirect(initial.MinInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinInstances = initial.MinInstances + } else { + cDes.MinInstances = des.MinInstances + } + if dcl.IsZeroValue(des.MaxInstances) || (dcl.IsEmptyValueIndirect(des.MaxInstances) && dcl.IsEmptyValueIndirect(initial.MaxInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxInstances = initial.MaxInstances + } else { + cDes.MaxInstances = des.MaxInstances + } + if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Weight = initial.Weight + } else { + cDes.Weight = des.Weight + } + + return cDes +} + +func canonicalizeAutoscalingPolicySecondaryWorkerConfigSlice(des, initial []AutoscalingPolicySecondaryWorkerConfig, opts ...dcl.ApplyOption) []AutoscalingPolicySecondaryWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeAutoscalingPolicySecondaryWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeAutoscalingPolicySecondaryWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c *Client, des, nw *AutoscalingPolicySecondaryWorkerConfig) *AutoscalingPolicySecondaryWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicySecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewAutoscalingPolicySecondaryWorkerConfigSet(c *Client, des, nw []AutoscalingPolicySecondaryWorkerConfig) []AutoscalingPolicySecondaryWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []AutoscalingPolicySecondaryWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareAutoscalingPolicySecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, des, nw []AutoscalingPolicySecondaryWorkerConfig) []AutoscalingPolicySecondaryWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []AutoscalingPolicySecondaryWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffAutoscalingPolicy(c *Client, desired, actual *AutoscalingPolicy, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Id")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.BasicAlgorithm, actual.BasicAlgorithm, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyBasicAlgorithmNewStyle, EmptyObject: EmptyAutoscalingPolicyBasicAlgorithm, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("BasicAlgorithm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyWorkerConfigNewStyle, EmptyObject: EmptyAutoscalingPolicyWorkerConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicySecondaryWorkerConfigNewStyle, EmptyObject: EmptyAutoscalingPolicySecondaryWorkerConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareAutoscalingPolicyBasicAlgorithmNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AutoscalingPolicyBasicAlgorithm) + if !ok { + desiredNotPointer, ok := d.(AutoscalingPolicyBasicAlgorithm) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithm or *AutoscalingPolicyBasicAlgorithm", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AutoscalingPolicyBasicAlgorithm) + if !ok { + actualNotPointer, ok := a.(AutoscalingPolicyBasicAlgorithm) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithm", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.YarnConfig, actual.YarnConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle, EmptyObject: EmptyAutoscalingPolicyBasicAlgorithmYarnConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("YarnConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CooldownPeriod, actual.CooldownPeriod, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("CooldownPeriod")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AutoscalingPolicyBasicAlgorithmYarnConfig) + if !ok { + desiredNotPointer, ok := d.(AutoscalingPolicyBasicAlgorithmYarnConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithmYarnConfig or *AutoscalingPolicyBasicAlgorithmYarnConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AutoscalingPolicyBasicAlgorithmYarnConfig) + if !ok { + actualNotPointer, ok := a.(AutoscalingPolicyBasicAlgorithmYarnConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithmYarnConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GracefulDecommissionTimeout, actual.GracefulDecommissionTimeout, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("GracefulDecommissionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScaleUpFactor, actual.ScaleUpFactor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleUpFactor")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScaleDownFactor, actual.ScaleDownFactor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleDownFactor")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScaleUpMinWorkerFraction, actual.ScaleUpMinWorkerFraction, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleUpMinWorkerFraction")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScaleDownMinWorkerFraction, actual.ScaleDownMinWorkerFraction, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleDownMinWorkerFraction")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAutoscalingPolicyWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AutoscalingPolicyWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(AutoscalingPolicyWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyWorkerConfig or *AutoscalingPolicyWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AutoscalingPolicyWorkerConfig) + if !ok { + actualNotPointer, ok := a.(AutoscalingPolicyWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinInstances, actual.MinInstances, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MinInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxInstances, actual.MaxInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MaxInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareAutoscalingPolicySecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*AutoscalingPolicySecondaryWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(AutoscalingPolicySecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicySecondaryWorkerConfig or *AutoscalingPolicySecondaryWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*AutoscalingPolicySecondaryWorkerConfig) + if !ok { + actualNotPointer, ok := a.(AutoscalingPolicySecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a AutoscalingPolicySecondaryWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinInstances, actual.MinInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MinInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxInstances, actual.MaxInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MaxInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *AutoscalingPolicy) urlNormalized() *AutoscalingPolicy { + normalized := dcl.Copy(*r).(AutoscalingPolicy) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *AutoscalingPolicy) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateAutoscalingPolicy" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the AutoscalingPolicy resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *AutoscalingPolicy) marshal(c *Client) ([]byte, error) { + m, err := expandAutoscalingPolicy(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling AutoscalingPolicy: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalAutoscalingPolicy decodes JSON responses into the AutoscalingPolicy resource schema. +func unmarshalAutoscalingPolicy(b []byte, c *Client, res *AutoscalingPolicy) (*AutoscalingPolicy, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapAutoscalingPolicy(m, c, res) +} + +func unmarshalMapAutoscalingPolicy(m map[string]interface{}, c *Client, res *AutoscalingPolicy) (*AutoscalingPolicy, error) { + + flattened := flattenAutoscalingPolicy(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandAutoscalingPolicy expands AutoscalingPolicy into a JSON request object. +func expandAutoscalingPolicy(c *Client, f *AutoscalingPolicy) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v := f.Name; dcl.ValueShouldBeSent(v) { + m["id"] = v + } + if v, err := expandAutoscalingPolicyBasicAlgorithm(c, f.BasicAlgorithm, res); err != nil { + return nil, fmt.Errorf("error expanding BasicAlgorithm into basicAlgorithm: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["basicAlgorithm"] = v + } + if v, err := expandAutoscalingPolicyWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandAutoscalingPolicySecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["secondaryWorkerConfig"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenAutoscalingPolicy flattens AutoscalingPolicy from a JSON request object into the +// AutoscalingPolicy type. +func flattenAutoscalingPolicy(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &AutoscalingPolicy{} + resultRes.Name = dcl.FlattenString(m["id"]) + resultRes.BasicAlgorithm = flattenAutoscalingPolicyBasicAlgorithm(c, m["basicAlgorithm"], res) + resultRes.WorkerConfig = flattenAutoscalingPolicyWorkerConfig(c, m["workerConfig"], res) + resultRes.SecondaryWorkerConfig = flattenAutoscalingPolicySecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandAutoscalingPolicyBasicAlgorithmMap expands the contents of AutoscalingPolicyBasicAlgorithm into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithmMap(c *Client, f map[string]AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAutoscalingPolicyBasicAlgorithm(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAutoscalingPolicyBasicAlgorithmSlice expands the contents of AutoscalingPolicyBasicAlgorithm into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithmSlice(c *Client, f []AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAutoscalingPolicyBasicAlgorithm(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAutoscalingPolicyBasicAlgorithmMap flattens the contents of AutoscalingPolicyBasicAlgorithm from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithmMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyBasicAlgorithm { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AutoscalingPolicyBasicAlgorithm{} + } + + if len(a) == 0 { + return map[string]AutoscalingPolicyBasicAlgorithm{} + } + + items := make(map[string]AutoscalingPolicyBasicAlgorithm) + for k, item := range a { + items[k] = *flattenAutoscalingPolicyBasicAlgorithm(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAutoscalingPolicyBasicAlgorithmSlice flattens the contents of AutoscalingPolicyBasicAlgorithm from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithmSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyBasicAlgorithm { + a, ok := i.([]interface{}) + if !ok { + return []AutoscalingPolicyBasicAlgorithm{} + } + + if len(a) == 0 { + return []AutoscalingPolicyBasicAlgorithm{} + } + + items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAutoscalingPolicyBasicAlgorithm(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAutoscalingPolicyBasicAlgorithm expands an instance of AutoscalingPolicyBasicAlgorithm into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithm(c *Client, f *AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, f.YarnConfig, res); err != nil { + return nil, fmt.Errorf("error expanding YarnConfig into yarnConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["yarnConfig"] = v + } + if v := f.CooldownPeriod; !dcl.IsEmptyValueIndirect(v) { + m["cooldownPeriod"] = v + } + + return m, nil +} + +// flattenAutoscalingPolicyBasicAlgorithm flattens an instance of AutoscalingPolicyBasicAlgorithm from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithm(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyBasicAlgorithm { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AutoscalingPolicyBasicAlgorithm{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAutoscalingPolicyBasicAlgorithm + } + r.YarnConfig = flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, m["yarnConfig"], res) + r.CooldownPeriod = dcl.FlattenString(m["cooldownPeriod"]) + + return r +} + +// expandAutoscalingPolicyBasicAlgorithmYarnConfigMap expands the contents of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithmYarnConfigMap(c *Client, f map[string]AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAutoscalingPolicyBasicAlgorithmYarnConfigSlice expands the contents of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, f []AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAutoscalingPolicyBasicAlgorithmYarnConfigMap flattens the contents of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithmYarnConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyBasicAlgorithmYarnConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + + if len(a) == 0 { + return map[string]AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + + items := make(map[string]AutoscalingPolicyBasicAlgorithmYarnConfig) + for k, item := range a { + items[k] = *flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAutoscalingPolicyBasicAlgorithmYarnConfigSlice flattens the contents of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyBasicAlgorithmYarnConfig { + a, ok := i.([]interface{}) + if !ok { + return []AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + + if len(a) == 0 { + return []AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + + items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAutoscalingPolicyBasicAlgorithmYarnConfig expands an instance of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON +// request object. +func expandAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, f *AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.GracefulDecommissionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["gracefulDecommissionTimeout"] = v + } + if v := f.ScaleUpFactor; !dcl.IsEmptyValueIndirect(v) { + m["scaleUpFactor"] = v + } + if v := f.ScaleDownFactor; !dcl.IsEmptyValueIndirect(v) { + m["scaleDownFactor"] = v + } + if v := f.ScaleUpMinWorkerFraction; !dcl.IsEmptyValueIndirect(v) { + m["scaleUpMinWorkerFraction"] = v + } + if v := f.ScaleDownMinWorkerFraction; !dcl.IsEmptyValueIndirect(v) { + m["scaleDownMinWorkerFraction"] = v + } + + return m, nil +} + +// flattenAutoscalingPolicyBasicAlgorithmYarnConfig flattens an instance of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON +// response object. +func flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyBasicAlgorithmYarnConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AutoscalingPolicyBasicAlgorithmYarnConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAutoscalingPolicyBasicAlgorithmYarnConfig + } + r.GracefulDecommissionTimeout = dcl.FlattenString(m["gracefulDecommissionTimeout"]) + r.ScaleUpFactor = dcl.FlattenDouble(m["scaleUpFactor"]) + r.ScaleDownFactor = dcl.FlattenDouble(m["scaleDownFactor"]) + r.ScaleUpMinWorkerFraction = dcl.FlattenDouble(m["scaleUpMinWorkerFraction"]) + r.ScaleDownMinWorkerFraction = dcl.FlattenDouble(m["scaleDownMinWorkerFraction"]) + + return r +} + +// expandAutoscalingPolicyWorkerConfigMap expands the contents of AutoscalingPolicyWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicyWorkerConfigMap(c *Client, f map[string]AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAutoscalingPolicyWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAutoscalingPolicyWorkerConfigSlice expands the contents of AutoscalingPolicyWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicyWorkerConfigSlice(c *Client, f []AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAutoscalingPolicyWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAutoscalingPolicyWorkerConfigMap flattens the contents of AutoscalingPolicyWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicyWorkerConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AutoscalingPolicyWorkerConfig{} + } + + if len(a) == 0 { + return map[string]AutoscalingPolicyWorkerConfig{} + } + + items := make(map[string]AutoscalingPolicyWorkerConfig) + for k, item := range a { + items[k] = *flattenAutoscalingPolicyWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAutoscalingPolicyWorkerConfigSlice flattens the contents of AutoscalingPolicyWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicyWorkerConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []AutoscalingPolicyWorkerConfig{} + } + + if len(a) == 0 { + return []AutoscalingPolicyWorkerConfig{} + } + + items := make([]AutoscalingPolicyWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAutoscalingPolicyWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAutoscalingPolicyWorkerConfig expands an instance of AutoscalingPolicyWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicyWorkerConfig(c *Client, f *AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinInstances; !dcl.IsEmptyValueIndirect(v) { + m["minInstances"] = v + } + if v := f.MaxInstances; !dcl.IsEmptyValueIndirect(v) { + m["maxInstances"] = v + } + if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { + m["weight"] = v + } + + return m, nil +} + +// flattenAutoscalingPolicyWorkerConfig flattens an instance of AutoscalingPolicyWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicyWorkerConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AutoscalingPolicyWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAutoscalingPolicyWorkerConfig + } + r.MinInstances = dcl.FlattenInteger(m["minInstances"]) + r.MaxInstances = dcl.FlattenInteger(m["maxInstances"]) + r.Weight = dcl.FlattenInteger(m["weight"]) + + return r +} + +// expandAutoscalingPolicySecondaryWorkerConfigMap expands the contents of AutoscalingPolicySecondaryWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicySecondaryWorkerConfigMap(c *Client, f map[string]AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandAutoscalingPolicySecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandAutoscalingPolicySecondaryWorkerConfigSlice expands the contents of AutoscalingPolicySecondaryWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, f []AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandAutoscalingPolicySecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenAutoscalingPolicySecondaryWorkerConfigMap flattens the contents of AutoscalingPolicySecondaryWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicySecondaryWorkerConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicySecondaryWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]AutoscalingPolicySecondaryWorkerConfig{} + } + + if len(a) == 0 { + return map[string]AutoscalingPolicySecondaryWorkerConfig{} + } + + items := make(map[string]AutoscalingPolicySecondaryWorkerConfig) + for k, item := range a { + items[k] = *flattenAutoscalingPolicySecondaryWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenAutoscalingPolicySecondaryWorkerConfigSlice flattens the contents of AutoscalingPolicySecondaryWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicySecondaryWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []AutoscalingPolicySecondaryWorkerConfig{} + } + + if len(a) == 0 { + return []AutoscalingPolicySecondaryWorkerConfig{} + } + + items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenAutoscalingPolicySecondaryWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandAutoscalingPolicySecondaryWorkerConfig expands an instance of AutoscalingPolicySecondaryWorkerConfig into a JSON +// request object. +func expandAutoscalingPolicySecondaryWorkerConfig(c *Client, f *AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinInstances; !dcl.IsEmptyValueIndirect(v) { + m["minInstances"] = v + } + if v := f.MaxInstances; !dcl.IsEmptyValueIndirect(v) { + m["maxInstances"] = v + } + if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { + m["weight"] = v + } + + return m, nil +} + +// flattenAutoscalingPolicySecondaryWorkerConfig flattens an instance of AutoscalingPolicySecondaryWorkerConfig from a JSON +// response object. +func flattenAutoscalingPolicySecondaryWorkerConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicySecondaryWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &AutoscalingPolicySecondaryWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyAutoscalingPolicySecondaryWorkerConfig + } + r.MinInstances = dcl.FlattenInteger(m["minInstances"]) + r.MaxInstances = dcl.FlattenInteger(m["maxInstances"]) + r.Weight = dcl.FlattenInteger(m["weight"]) + + return r +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *AutoscalingPolicy) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalAutoscalingPolicy(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type autoscalingPolicyDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp autoscalingPolicyApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToAutoscalingPolicyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]autoscalingPolicyDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []autoscalingPolicyDiff + // For each operation name, create a autoscalingPolicyDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := autoscalingPolicyDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToAutoscalingPolicyApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToAutoscalingPolicyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (autoscalingPolicyApiOperation, error) { + switch opName { + + case "updateAutoscalingPolicyUpdateAutoscalingPolicyOperation": + return &updateAutoscalingPolicyUpdateAutoscalingPolicyOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractAutoscalingPolicyFields(r *AutoscalingPolicy) error { + vBasicAlgorithm := r.BasicAlgorithm + if vBasicAlgorithm == nil { + // note: explicitly not the empty object. + vBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{} + } + if err := extractAutoscalingPolicyBasicAlgorithmFields(r, vBasicAlgorithm); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBasicAlgorithm) { + r.BasicAlgorithm = vBasicAlgorithm + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &AutoscalingPolicyWorkerConfig{} + } + if err := extractAutoscalingPolicyWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := r.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{} + } + if err := extractAutoscalingPolicySecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + r.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + return nil +} +func extractAutoscalingPolicyBasicAlgorithmFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithm) error { + vYarnConfig := o.YarnConfig + if vYarnConfig == nil { + // note: explicitly not the empty object. + vYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + if err := extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r, vYarnConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vYarnConfig) { + o.YarnConfig = vYarnConfig + } + return nil +} +func extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithmYarnConfig) error { + return nil +} +func extractAutoscalingPolicyWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyWorkerConfig) error { + return nil +} +func extractAutoscalingPolicySecondaryWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicySecondaryWorkerConfig) error { + return nil +} + +func postReadExtractAutoscalingPolicyFields(r *AutoscalingPolicy) error { + vBasicAlgorithm := r.BasicAlgorithm + if vBasicAlgorithm == nil { + // note: explicitly not the empty object. + vBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{} + } + if err := postReadExtractAutoscalingPolicyBasicAlgorithmFields(r, vBasicAlgorithm); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vBasicAlgorithm) { + r.BasicAlgorithm = vBasicAlgorithm + } + vWorkerConfig := r.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &AutoscalingPolicyWorkerConfig{} + } + if err := postReadExtractAutoscalingPolicyWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + r.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := r.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{} + } + if err := postReadExtractAutoscalingPolicySecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + r.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + return nil +} +func postReadExtractAutoscalingPolicyBasicAlgorithmFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithm) error { + vYarnConfig := o.YarnConfig + if vYarnConfig == nil { + // note: explicitly not the empty object. + vYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{} + } + if err := extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r, vYarnConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vYarnConfig) { + o.YarnConfig = vYarnConfig + } + return nil +} +func postReadExtractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithmYarnConfig) error { + return nil +} +func postReadExtractAutoscalingPolicyWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyWorkerConfig) error { + return nil +} +func postReadExtractAutoscalingPolicySecondaryWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicySecondaryWorkerConfig) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl new file mode 100644 index 000000000000..a1b03be133d0 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl @@ -0,0 +1,250 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLAutoscalingPolicySchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataproc/AutoscalingPolicy", + Description: "The Dataproc AutoscalingPolicy resource", + StructName: "AutoscalingPolicy", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a AutoscalingPolicy", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "autoscalingPolicy", + Required: true, + Description: "A full instance of a AutoscalingPolicy", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a AutoscalingPolicy", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "autoscalingPolicy", + Required: true, + Description: "A full instance of a AutoscalingPolicy", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a AutoscalingPolicy", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "autoscalingPolicy", + Required: true, + Description: "A full instance of a AutoscalingPolicy", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all AutoscalingPolicy", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many AutoscalingPolicy", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "AutoscalingPolicy": &dcl.Component{ + Title: "AutoscalingPolicy", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "basicAlgorithm", + "workerConfig", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "basicAlgorithm": &dcl.Property{ + Type: "object", + GoName: "BasicAlgorithm", + GoType: "AutoscalingPolicyBasicAlgorithm", + Required: []string{ + "yarnConfig", + }, + Properties: map[string]*dcl.Property{ + "cooldownPeriod": &dcl.Property{ + Type: "string", + GoName: "CooldownPeriod", + Description: "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: . Default: 2m.", + ServerDefault: true, + }, + "yarnConfig": &dcl.Property{ + Type: "object", + GoName: "YarnConfig", + GoType: "AutoscalingPolicyBasicAlgorithmYarnConfig", + Description: "Required. YARN autoscaling configuration.", + Required: []string{ + "gracefulDecommissionTimeout", + "scaleUpFactor", + "scaleDownFactor", + }, + Properties: map[string]*dcl.Property{ + "gracefulDecommissionTimeout": &dcl.Property{ + Type: "string", + GoName: "GracefulDecommissionTimeout", + Description: "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.", + }, + "scaleDownFactor": &dcl.Property{ + Type: "number", + Format: "double", + GoName: "ScaleDownFactor", + Description: "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See .", + }, + "scaleDownMinWorkerFraction": &dcl.Property{ + Type: "number", + Format: "double", + GoName: "ScaleDownMinWorkerFraction", + Description: "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: . Default: 0.0.", + }, + "scaleUpFactor": &dcl.Property{ + Type: "number", + Format: "double", + GoName: "ScaleUpFactor", + Description: "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See .", + }, + "scaleUpMinWorkerFraction": &dcl.Property{ + Type: "number", + Format: "double", + GoName: "ScaleUpMinWorkerFraction", + Description: "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: . Default: 0.0.", + }, + }, + }, + }, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.autoscalingPolicies`, the resource name of the policy has the following format: `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` * For `projects.locations.autoscalingPolicies`, the resource name of the policy has the following format: `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "secondaryWorkerConfig": &dcl.Property{ + Type: "object", + GoName: "SecondaryWorkerConfig", + GoType: "AutoscalingPolicySecondaryWorkerConfig", + Description: "Optional. Describes how the autoscaler will operate for secondary workers.", + Properties: map[string]*dcl.Property{ + "maxInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxInstances", + Description: "Optional. Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", + }, + "minInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MinInstances", + Description: "Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0.", + }, + "weight": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Weight", + Description: "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", + ServerDefault: true, + }, + }, + }, + "workerConfig": &dcl.Property{ + Type: "object", + GoName: "WorkerConfig", + GoType: "AutoscalingPolicyWorkerConfig", + Description: "Required. Describes how the autoscaler will operate for primary workers.", + Required: []string{ + "maxInstances", + }, + Properties: map[string]*dcl.Property{ + "maxInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxInstances", + Description: "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", + }, + "minInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MinInstances", + Description: "Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0.", + ServerDefault: true, + }, + "weight": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Weight", + Description: "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", + ServerDefault: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/client.go.tmpl b/mmv1/third_party/terraform/services/dataproc/client.go.tmpl new file mode 100644 index 000000000000..006c4fa2094c --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/client.go.tmpl @@ -0,0 +1,18 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl new file mode 100644 index 000000000000..e695420be558 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl @@ -0,0 +1,3457 @@ +package dataproc + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + "google.golang.org/api/googleapi" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type Cluster struct { + Project *string `json:"project"` + Name *string `json:"name"` + Config *ClusterConfig `json:"config"` + Labels map[string]string `json:"labels"` + Status *ClusterStatus `json:"status"` + StatusHistory []ClusterStatusHistory `json:"statusHistory"` + ClusterUuid *string `json:"clusterUuid"` + Metrics *ClusterMetrics `json:"metrics"` + Location *string `json:"location"` + VirtualClusterConfig *ClusterVirtualClusterConfig `json:"virtualClusterConfig"` +} + +func (r *Cluster) String() string { + return dcl.SprintResource(r) +} + +// The enum ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum. +type ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum string + +// ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef returns a *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s string) *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + v := ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(s) + return &v +} + +func (v ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", "INHERIT_FROM_SUBNETWORK", "OUTBOUND", "BIDIRECTIONAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum. +type ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum string + +// ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef returns a *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s string) *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + v := ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(s) + return &v +} + +func (v ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TYPE_UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigMasterConfigPreemptibilityEnum. +type ClusterConfigMasterConfigPreemptibilityEnum string + +// ClusterConfigMasterConfigPreemptibilityEnumRef returns a *ClusterConfigMasterConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigMasterConfigPreemptibilityEnumRef(s string) *ClusterConfigMasterConfigPreemptibilityEnum { + v := ClusterConfigMasterConfigPreemptibilityEnum(s) + return &v +} + +func (v ClusterConfigMasterConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigMasterConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigWorkerConfigPreemptibilityEnum. +type ClusterConfigWorkerConfigPreemptibilityEnum string + +// ClusterConfigWorkerConfigPreemptibilityEnumRef returns a *ClusterConfigWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigWorkerConfigPreemptibilityEnumRef(s string) *ClusterConfigWorkerConfigPreemptibilityEnum { + v := ClusterConfigWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v ClusterConfigWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigSecondaryWorkerConfigPreemptibilityEnum. +type ClusterConfigSecondaryWorkerConfigPreemptibilityEnum string + +// ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef returns a *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s string) *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + v := ClusterConfigSecondaryWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v ClusterConfigSecondaryWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigSecondaryWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigSoftwareConfigOptionalComponentsEnum. +type ClusterConfigSoftwareConfigOptionalComponentsEnum string + +// ClusterConfigSoftwareConfigOptionalComponentsEnumRef returns a *ClusterConfigSoftwareConfigOptionalComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigSoftwareConfigOptionalComponentsEnumRef(s string) *ClusterConfigSoftwareConfigOptionalComponentsEnum { + v := ClusterConfigSoftwareConfigOptionalComponentsEnum(s) + return &v +} + +func (v ClusterConfigSoftwareConfigOptionalComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "FLINK", "HBASE", "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigSoftwareConfigOptionalComponentsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum. +type ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum string + +// ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef returns a *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef(s string) *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { + v := ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(s) + return &v +} + +func (v ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"METRIC_SOURCE_UNSPECIFIED", "MONITORING_AGENT_DEFAULTS", "HDFS", "SPARK", "YARN", "SPARK_HISTORY_SERVER", "HIVESERVER2"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterStatusStateEnum. +type ClusterStatusStateEnum string + +// ClusterStatusStateEnumRef returns a *ClusterStatusStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStatusStateEnumRef(s string) *ClusterStatusStateEnum { + v := ClusterStatusStateEnum(s) + return &v +} + +func (v ClusterStatusStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"UNKNOWN", "CREATING", "RUNNING", "ERROR", "DELETING", "UPDATING", "STOPPING", "STOPPED", "STARTING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStatusStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterStatusSubstateEnum. +type ClusterStatusSubstateEnum string + +// ClusterStatusSubstateEnumRef returns a *ClusterStatusSubstateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStatusSubstateEnumRef(s string) *ClusterStatusSubstateEnum { + v := ClusterStatusSubstateEnum(s) + return &v +} + +func (v ClusterStatusSubstateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"UNSPECIFIED", "UNHEALTHY", "STALE_STATUS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStatusSubstateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterStatusHistoryStateEnum. +type ClusterStatusHistoryStateEnum string + +// ClusterStatusHistoryStateEnumRef returns a *ClusterStatusHistoryStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStatusHistoryStateEnumRef(s string) *ClusterStatusHistoryStateEnum { + v := ClusterStatusHistoryStateEnum(s) + return &v +} + +func (v ClusterStatusHistoryStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"UNKNOWN", "CREATING", "RUNNING", "ERROR", "DELETING", "UPDATING", "STOPPING", "STOPPED", "STARTING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStatusHistoryStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterStatusHistorySubstateEnum. +type ClusterStatusHistorySubstateEnum string + +// ClusterStatusHistorySubstateEnumRef returns a *ClusterStatusHistorySubstateEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterStatusHistorySubstateEnumRef(s string) *ClusterStatusHistorySubstateEnum { + v := ClusterStatusHistorySubstateEnum(s) + return &v +} + +func (v ClusterStatusHistorySubstateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"UNSPECIFIED", "UNHEALTHY", "STALE_STATUS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterStatusHistorySubstateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum. +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum string + +// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef returns a *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum with the value of string s +// If the empty string is provided, nil is returned. +func ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef(s string) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { + v := ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(s) + return &v +} + +func (v ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"ROLE_UNSPECIFIED", "DEFAULT", "CONTROLLER", "SPARK_DRIVER", "SPARK_EXECUTOR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ClusterConfig struct { + empty bool `json:"-"` + StagingBucket *string `json:"stagingBucket"` + TempBucket *string `json:"tempBucket"` + GceClusterConfig *ClusterConfigGceClusterConfig `json:"gceClusterConfig"` + MasterConfig *ClusterConfigMasterConfig `json:"masterConfig"` + WorkerConfig *ClusterConfigWorkerConfig `json:"workerConfig"` + SecondaryWorkerConfig *ClusterConfigSecondaryWorkerConfig `json:"secondaryWorkerConfig"` + SoftwareConfig *ClusterConfigSoftwareConfig `json:"softwareConfig"` + InitializationActions []ClusterConfigInitializationActions `json:"initializationActions"` + EncryptionConfig *ClusterConfigEncryptionConfig `json:"encryptionConfig"` + AutoscalingConfig *ClusterConfigAutoscalingConfig `json:"autoscalingConfig"` + SecurityConfig *ClusterConfigSecurityConfig `json:"securityConfig"` + LifecycleConfig *ClusterConfigLifecycleConfig `json:"lifecycleConfig"` + EndpointConfig *ClusterConfigEndpointConfig `json:"endpointConfig"` + GkeClusterConfig *ClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` + MetastoreConfig *ClusterConfigMetastoreConfig `json:"metastoreConfig"` + DataprocMetricConfig *ClusterConfigDataprocMetricConfig `json:"dataprocMetricConfig"` +} + +type jsonClusterConfig ClusterConfig + +func (r *ClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfig + } else { + + r.StagingBucket = res.StagingBucket + + r.TempBucket = res.TempBucket + + r.GceClusterConfig = res.GceClusterConfig + + r.MasterConfig = res.MasterConfig + + r.WorkerConfig = res.WorkerConfig + + r.SecondaryWorkerConfig = res.SecondaryWorkerConfig + + r.SoftwareConfig = res.SoftwareConfig + + r.InitializationActions = res.InitializationActions + + r.EncryptionConfig = res.EncryptionConfig + + r.AutoscalingConfig = res.AutoscalingConfig + + r.SecurityConfig = res.SecurityConfig + + r.LifecycleConfig = res.LifecycleConfig + + r.EndpointConfig = res.EndpointConfig + + r.GkeClusterConfig = res.GkeClusterConfig + + r.MetastoreConfig = res.MetastoreConfig + + r.DataprocMetricConfig = res.DataprocMetricConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfig *ClusterConfig = &ClusterConfig{empty: true} + +func (r *ClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGceClusterConfig struct { + empty bool `json:"-"` + Zone *string `json:"zone"` + Network *string `json:"network"` + Subnetwork *string `json:"subnetwork"` + InternalIPOnly *bool `json:"internalIPOnly"` + PrivateIPv6GoogleAccess *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum `json:"privateIPv6GoogleAccess"` + ServiceAccount *string `json:"serviceAccount"` + ServiceAccountScopes []string `json:"serviceAccountScopes"` + Tags []string `json:"tags"` + Metadata map[string]string `json:"metadata"` + ReservationAffinity *ClusterConfigGceClusterConfigReservationAffinity `json:"reservationAffinity"` + NodeGroupAffinity *ClusterConfigGceClusterConfigNodeGroupAffinity `json:"nodeGroupAffinity"` + ShieldedInstanceConfig *ClusterConfigGceClusterConfigShieldedInstanceConfig `json:"shieldedInstanceConfig"` + ConfidentialInstanceConfig *ClusterConfigGceClusterConfigConfidentialInstanceConfig `json:"confidentialInstanceConfig"` +} + +type jsonClusterConfigGceClusterConfig ClusterConfigGceClusterConfig + +func (r *ClusterConfigGceClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGceClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGceClusterConfig + } else { + + r.Zone = res.Zone + + r.Network = res.Network + + r.Subnetwork = res.Subnetwork + + r.InternalIPOnly = res.InternalIPOnly + + r.PrivateIPv6GoogleAccess = res.PrivateIPv6GoogleAccess + + r.ServiceAccount = res.ServiceAccount + + r.ServiceAccountScopes = res.ServiceAccountScopes + + r.Tags = res.Tags + + r.Metadata = res.Metadata + + r.ReservationAffinity = res.ReservationAffinity + + r.NodeGroupAffinity = res.NodeGroupAffinity + + r.ShieldedInstanceConfig = res.ShieldedInstanceConfig + + r.ConfidentialInstanceConfig = res.ConfidentialInstanceConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGceClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGceClusterConfig *ClusterConfigGceClusterConfig = &ClusterConfigGceClusterConfig{empty: true} + +func (r *ClusterConfigGceClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGceClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGceClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGceClusterConfigReservationAffinity struct { + empty bool `json:"-"` + ConsumeReservationType *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum `json:"consumeReservationType"` + Key *string `json:"key"` + Values []string `json:"values"` +} + +type jsonClusterConfigGceClusterConfigReservationAffinity ClusterConfigGceClusterConfigReservationAffinity + +func (r *ClusterConfigGceClusterConfigReservationAffinity) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGceClusterConfigReservationAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGceClusterConfigReservationAffinity + } else { + + r.ConsumeReservationType = res.ConsumeReservationType + + r.Key = res.Key + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGceClusterConfigReservationAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGceClusterConfigReservationAffinity *ClusterConfigGceClusterConfigReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{empty: true} + +func (r *ClusterConfigGceClusterConfigReservationAffinity) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGceClusterConfigReservationAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGceClusterConfigReservationAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGceClusterConfigNodeGroupAffinity struct { + empty bool `json:"-"` + NodeGroup *string `json:"nodeGroup"` +} + +type jsonClusterConfigGceClusterConfigNodeGroupAffinity ClusterConfigGceClusterConfigNodeGroupAffinity + +func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGceClusterConfigNodeGroupAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGceClusterConfigNodeGroupAffinity + } else { + + r.NodeGroup = res.NodeGroup + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGceClusterConfigNodeGroupAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGceClusterConfigNodeGroupAffinity *ClusterConfigGceClusterConfigNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{empty: true} + +func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGceClusterConfigShieldedInstanceConfig struct { + empty bool `json:"-"` + EnableSecureBoot *bool `json:"enableSecureBoot"` + EnableVtpm *bool `json:"enableVtpm"` + EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring"` +} + +type jsonClusterConfigGceClusterConfigShieldedInstanceConfig ClusterConfigGceClusterConfigShieldedInstanceConfig + +func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGceClusterConfigShieldedInstanceConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGceClusterConfigShieldedInstanceConfig + } else { + + r.EnableSecureBoot = res.EnableSecureBoot + + r.EnableVtpm = res.EnableVtpm + + r.EnableIntegrityMonitoring = res.EnableIntegrityMonitoring + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGceClusterConfigShieldedInstanceConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGceClusterConfigShieldedInstanceConfig *ClusterConfigGceClusterConfigShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{empty: true} + +func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGceClusterConfigConfidentialInstanceConfig struct { + empty bool `json:"-"` + EnableConfidentialCompute *bool `json:"enableConfidentialCompute"` +} + +type jsonClusterConfigGceClusterConfigConfidentialInstanceConfig ClusterConfigGceClusterConfigConfidentialInstanceConfig + +func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGceClusterConfigConfidentialInstanceConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig + } else { + + r.EnableConfidentialCompute = res.EnableConfidentialCompute + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGceClusterConfigConfidentialInstanceConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig *ClusterConfigGceClusterConfigConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{empty: true} + +func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMasterConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *ClusterConfigMasterConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *ClusterConfigMasterConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *ClusterConfigMasterConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []ClusterConfigMasterConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` + InstanceReferences []ClusterConfigMasterConfigInstanceReferences `json:"instanceReferences"` +} + +type jsonClusterConfigMasterConfig ClusterConfigMasterConfig + +func (r *ClusterConfigMasterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMasterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMasterConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + r.InstanceReferences = res.InstanceReferences + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMasterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMasterConfig *ClusterConfigMasterConfig = &ClusterConfigMasterConfig{empty: true} + +func (r *ClusterConfigMasterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMasterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMasterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMasterConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` + LocalSsdInterface *string `json:"localSsdInterface"` +} + +type jsonClusterConfigMasterConfigDiskConfig ClusterConfigMasterConfigDiskConfig + +func (r *ClusterConfigMasterConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMasterConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMasterConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + r.LocalSsdInterface = res.LocalSsdInterface + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMasterConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMasterConfigDiskConfig *ClusterConfigMasterConfigDiskConfig = &ClusterConfigMasterConfigDiskConfig{empty: true} + +func (r *ClusterConfigMasterConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMasterConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMasterConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMasterConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonClusterConfigMasterConfigManagedGroupConfig ClusterConfigMasterConfigManagedGroupConfig + +func (r *ClusterConfigMasterConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMasterConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMasterConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMasterConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMasterConfigManagedGroupConfig *ClusterConfigMasterConfigManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{empty: true} + +func (r *ClusterConfigMasterConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMasterConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMasterConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMasterConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonClusterConfigMasterConfigAccelerators ClusterConfigMasterConfigAccelerators + +func (r *ClusterConfigMasterConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMasterConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMasterConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMasterConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMasterConfigAccelerators *ClusterConfigMasterConfigAccelerators = &ClusterConfigMasterConfigAccelerators{empty: true} + +func (r *ClusterConfigMasterConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMasterConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMasterConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMasterConfigInstanceReferences struct { + empty bool `json:"-"` + InstanceName *string `json:"instanceName"` + InstanceId *string `json:"instanceId"` + PublicKey *string `json:"publicKey"` + PublicEciesKey *string `json:"publicEciesKey"` +} + +type jsonClusterConfigMasterConfigInstanceReferences ClusterConfigMasterConfigInstanceReferences + +func (r *ClusterConfigMasterConfigInstanceReferences) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMasterConfigInstanceReferences + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMasterConfigInstanceReferences + } else { + + r.InstanceName = res.InstanceName + + r.InstanceId = res.InstanceId + + r.PublicKey = res.PublicKey + + r.PublicEciesKey = res.PublicEciesKey + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMasterConfigInstanceReferences is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMasterConfigInstanceReferences *ClusterConfigMasterConfigInstanceReferences = &ClusterConfigMasterConfigInstanceReferences{empty: true} + +func (r *ClusterConfigMasterConfigInstanceReferences) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMasterConfigInstanceReferences) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMasterConfigInstanceReferences) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *ClusterConfigWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *ClusterConfigWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *ClusterConfigWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []ClusterConfigWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` + InstanceReferences []ClusterConfigWorkerConfigInstanceReferences `json:"instanceReferences"` +} + +type jsonClusterConfigWorkerConfig ClusterConfigWorkerConfig + +func (r *ClusterConfigWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + r.InstanceReferences = res.InstanceReferences + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigWorkerConfig *ClusterConfigWorkerConfig = &ClusterConfigWorkerConfig{empty: true} + +func (r *ClusterConfigWorkerConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` + LocalSsdInterface *string `json:"localSsdInterface"` +} + +type jsonClusterConfigWorkerConfigDiskConfig ClusterConfigWorkerConfigDiskConfig + +func (r *ClusterConfigWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + r.LocalSsdInterface = res.LocalSsdInterface + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigWorkerConfigDiskConfig *ClusterConfigWorkerConfigDiskConfig = &ClusterConfigWorkerConfigDiskConfig{empty: true} + +func (r *ClusterConfigWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonClusterConfigWorkerConfigManagedGroupConfig ClusterConfigWorkerConfigManagedGroupConfig + +func (r *ClusterConfigWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigWorkerConfigManagedGroupConfig *ClusterConfigWorkerConfigManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{empty: true} + +func (r *ClusterConfigWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonClusterConfigWorkerConfigAccelerators ClusterConfigWorkerConfigAccelerators + +func (r *ClusterConfigWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigWorkerConfigAccelerators *ClusterConfigWorkerConfigAccelerators = &ClusterConfigWorkerConfigAccelerators{empty: true} + +func (r *ClusterConfigWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *ClusterConfigWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigWorkerConfigInstanceReferences struct { + empty bool `json:"-"` + InstanceName *string `json:"instanceName"` + InstanceId *string `json:"instanceId"` + PublicKey *string `json:"publicKey"` + PublicEciesKey *string `json:"publicEciesKey"` +} + +type jsonClusterConfigWorkerConfigInstanceReferences ClusterConfigWorkerConfigInstanceReferences + +func (r *ClusterConfigWorkerConfigInstanceReferences) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigWorkerConfigInstanceReferences + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigWorkerConfigInstanceReferences + } else { + + r.InstanceName = res.InstanceName + + r.InstanceId = res.InstanceId + + r.PublicKey = res.PublicKey + + r.PublicEciesKey = res.PublicEciesKey + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigWorkerConfigInstanceReferences is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigWorkerConfigInstanceReferences *ClusterConfigWorkerConfigInstanceReferences = &ClusterConfigWorkerConfigInstanceReferences{empty: true} + +func (r *ClusterConfigWorkerConfigInstanceReferences) Empty() bool { + return r.empty +} + +func (r *ClusterConfigWorkerConfigInstanceReferences) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigWorkerConfigInstanceReferences) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecondaryWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *ClusterConfigSecondaryWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *ClusterConfigSecondaryWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []ClusterConfigSecondaryWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` + InstanceReferences []ClusterConfigSecondaryWorkerConfigInstanceReferences `json:"instanceReferences"` +} + +type jsonClusterConfigSecondaryWorkerConfig ClusterConfigSecondaryWorkerConfig + +func (r *ClusterConfigSecondaryWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecondaryWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecondaryWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + r.InstanceReferences = res.InstanceReferences + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecondaryWorkerConfig *ClusterConfigSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{empty: true} + +func (r *ClusterConfigSecondaryWorkerConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecondaryWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecondaryWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecondaryWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` + LocalSsdInterface *string `json:"localSsdInterface"` +} + +type jsonClusterConfigSecondaryWorkerConfigDiskConfig ClusterConfigSecondaryWorkerConfigDiskConfig + +func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecondaryWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecondaryWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + r.LocalSsdInterface = res.LocalSsdInterface + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecondaryWorkerConfigDiskConfig *ClusterConfigSecondaryWorkerConfigDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{empty: true} + +func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecondaryWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonClusterConfigSecondaryWorkerConfigManagedGroupConfig ClusterConfigSecondaryWorkerConfigManagedGroupConfig + +func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecondaryWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig *ClusterConfigSecondaryWorkerConfigManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{empty: true} + +func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecondaryWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonClusterConfigSecondaryWorkerConfigAccelerators ClusterConfigSecondaryWorkerConfigAccelerators + +func (r *ClusterConfigSecondaryWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecondaryWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecondaryWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecondaryWorkerConfigAccelerators *ClusterConfigSecondaryWorkerConfigAccelerators = &ClusterConfigSecondaryWorkerConfigAccelerators{empty: true} + +func (r *ClusterConfigSecondaryWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecondaryWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecondaryWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecondaryWorkerConfigInstanceReferences struct { + empty bool `json:"-"` + InstanceName *string `json:"instanceName"` + InstanceId *string `json:"instanceId"` + PublicKey *string `json:"publicKey"` + PublicEciesKey *string `json:"publicEciesKey"` +} + +type jsonClusterConfigSecondaryWorkerConfigInstanceReferences ClusterConfigSecondaryWorkerConfigInstanceReferences + +func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecondaryWorkerConfigInstanceReferences + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecondaryWorkerConfigInstanceReferences + } else { + + r.InstanceName = res.InstanceName + + r.InstanceId = res.InstanceId + + r.PublicKey = res.PublicKey + + r.PublicEciesKey = res.PublicEciesKey + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigInstanceReferences is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecondaryWorkerConfigInstanceReferences *ClusterConfigSecondaryWorkerConfigInstanceReferences = &ClusterConfigSecondaryWorkerConfigInstanceReferences{empty: true} + +func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSoftwareConfig struct { + empty bool `json:"-"` + ImageVersion *string `json:"imageVersion"` + Properties map[string]string `json:"properties"` + OptionalComponents []ClusterConfigSoftwareConfigOptionalComponentsEnum `json:"optionalComponents"` +} + +type jsonClusterConfigSoftwareConfig ClusterConfigSoftwareConfig + +func (r *ClusterConfigSoftwareConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSoftwareConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSoftwareConfig + } else { + + r.ImageVersion = res.ImageVersion + + r.Properties = res.Properties + + r.OptionalComponents = res.OptionalComponents + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSoftwareConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSoftwareConfig *ClusterConfigSoftwareConfig = &ClusterConfigSoftwareConfig{empty: true} + +func (r *ClusterConfigSoftwareConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSoftwareConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSoftwareConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigInitializationActions struct { + empty bool `json:"-"` + ExecutableFile *string `json:"executableFile"` + ExecutionTimeout *string `json:"executionTimeout"` +} + +type jsonClusterConfigInitializationActions ClusterConfigInitializationActions + +func (r *ClusterConfigInitializationActions) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigInitializationActions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigInitializationActions + } else { + + r.ExecutableFile = res.ExecutableFile + + r.ExecutionTimeout = res.ExecutionTimeout + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigInitializationActions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigInitializationActions *ClusterConfigInitializationActions = &ClusterConfigInitializationActions{empty: true} + +func (r *ClusterConfigInitializationActions) Empty() bool { + return r.empty +} + +func (r *ClusterConfigInitializationActions) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigInitializationActions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigEncryptionConfig struct { + empty bool `json:"-"` + GcePdKmsKeyName *string `json:"gcePdKmsKeyName"` +} + +type jsonClusterConfigEncryptionConfig ClusterConfigEncryptionConfig + +func (r *ClusterConfigEncryptionConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigEncryptionConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigEncryptionConfig + } else { + + r.GcePdKmsKeyName = res.GcePdKmsKeyName + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigEncryptionConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigEncryptionConfig *ClusterConfigEncryptionConfig = &ClusterConfigEncryptionConfig{empty: true} + +func (r *ClusterConfigEncryptionConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigEncryptionConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigEncryptionConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigAutoscalingConfig struct { + empty bool `json:"-"` + Policy *string `json:"policy"` +} + +type jsonClusterConfigAutoscalingConfig ClusterConfigAutoscalingConfig + +func (r *ClusterConfigAutoscalingConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigAutoscalingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigAutoscalingConfig + } else { + + r.Policy = res.Policy + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigAutoscalingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigAutoscalingConfig *ClusterConfigAutoscalingConfig = &ClusterConfigAutoscalingConfig{empty: true} + +func (r *ClusterConfigAutoscalingConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigAutoscalingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigAutoscalingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecurityConfig struct { + empty bool `json:"-"` + KerberosConfig *ClusterConfigSecurityConfigKerberosConfig `json:"kerberosConfig"` + IdentityConfig *ClusterConfigSecurityConfigIdentityConfig `json:"identityConfig"` +} + +type jsonClusterConfigSecurityConfig ClusterConfigSecurityConfig + +func (r *ClusterConfigSecurityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecurityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecurityConfig + } else { + + r.KerberosConfig = res.KerberosConfig + + r.IdentityConfig = res.IdentityConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecurityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecurityConfig *ClusterConfigSecurityConfig = &ClusterConfigSecurityConfig{empty: true} + +func (r *ClusterConfigSecurityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecurityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecurityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecurityConfigKerberosConfig struct { + empty bool `json:"-"` + EnableKerberos *bool `json:"enableKerberos"` + RootPrincipalPassword *string `json:"rootPrincipalPassword"` + KmsKey *string `json:"kmsKey"` + Keystore *string `json:"keystore"` + Truststore *string `json:"truststore"` + KeystorePassword *string `json:"keystorePassword"` + KeyPassword *string `json:"keyPassword"` + TruststorePassword *string `json:"truststorePassword"` + CrossRealmTrustRealm *string `json:"crossRealmTrustRealm"` + CrossRealmTrustKdc *string `json:"crossRealmTrustKdc"` + CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer"` + CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword"` + KdcDbKey *string `json:"kdcDbKey"` + TgtLifetimeHours *int64 `json:"tgtLifetimeHours"` + Realm *string `json:"realm"` +} + +type jsonClusterConfigSecurityConfigKerberosConfig ClusterConfigSecurityConfigKerberosConfig + +func (r *ClusterConfigSecurityConfigKerberosConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecurityConfigKerberosConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecurityConfigKerberosConfig + } else { + + r.EnableKerberos = res.EnableKerberos + + r.RootPrincipalPassword = res.RootPrincipalPassword + + r.KmsKey = res.KmsKey + + r.Keystore = res.Keystore + + r.Truststore = res.Truststore + + r.KeystorePassword = res.KeystorePassword + + r.KeyPassword = res.KeyPassword + + r.TruststorePassword = res.TruststorePassword + + r.CrossRealmTrustRealm = res.CrossRealmTrustRealm + + r.CrossRealmTrustKdc = res.CrossRealmTrustKdc + + r.CrossRealmTrustAdminServer = res.CrossRealmTrustAdminServer + + r.CrossRealmTrustSharedPassword = res.CrossRealmTrustSharedPassword + + r.KdcDbKey = res.KdcDbKey + + r.TgtLifetimeHours = res.TgtLifetimeHours + + r.Realm = res.Realm + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecurityConfigKerberosConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecurityConfigKerberosConfig *ClusterConfigSecurityConfigKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{empty: true} + +func (r *ClusterConfigSecurityConfigKerberosConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecurityConfigKerberosConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecurityConfigKerberosConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigSecurityConfigIdentityConfig struct { + empty bool `json:"-"` + UserServiceAccountMapping map[string]string `json:"userServiceAccountMapping"` +} + +type jsonClusterConfigSecurityConfigIdentityConfig ClusterConfigSecurityConfigIdentityConfig + +func (r *ClusterConfigSecurityConfigIdentityConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigSecurityConfigIdentityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigSecurityConfigIdentityConfig + } else { + + r.UserServiceAccountMapping = res.UserServiceAccountMapping + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigSecurityConfigIdentityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigSecurityConfigIdentityConfig *ClusterConfigSecurityConfigIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{empty: true} + +func (r *ClusterConfigSecurityConfigIdentityConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigSecurityConfigIdentityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigSecurityConfigIdentityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigLifecycleConfig struct { + empty bool `json:"-"` + IdleDeleteTtl *string `json:"idleDeleteTtl"` + AutoDeleteTime *string `json:"autoDeleteTime"` + AutoDeleteTtl *string `json:"autoDeleteTtl"` + IdleStartTime *string `json:"idleStartTime"` +} + +type jsonClusterConfigLifecycleConfig ClusterConfigLifecycleConfig + +func (r *ClusterConfigLifecycleConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigLifecycleConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigLifecycleConfig + } else { + + r.IdleDeleteTtl = res.IdleDeleteTtl + + r.AutoDeleteTime = res.AutoDeleteTime + + r.AutoDeleteTtl = res.AutoDeleteTtl + + r.IdleStartTime = res.IdleStartTime + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigLifecycleConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigLifecycleConfig *ClusterConfigLifecycleConfig = &ClusterConfigLifecycleConfig{empty: true} + +func (r *ClusterConfigLifecycleConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigLifecycleConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigLifecycleConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigEndpointConfig struct { + empty bool `json:"-"` + HttpPorts map[string]string `json:"httpPorts"` + EnableHttpPortAccess *bool `json:"enableHttpPortAccess"` +} + +type jsonClusterConfigEndpointConfig ClusterConfigEndpointConfig + +func (r *ClusterConfigEndpointConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigEndpointConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigEndpointConfig + } else { + + r.HttpPorts = res.HttpPorts + + r.EnableHttpPortAccess = res.EnableHttpPortAccess + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigEndpointConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigEndpointConfig *ClusterConfigEndpointConfig = &ClusterConfigEndpointConfig{empty: true} + +func (r *ClusterConfigEndpointConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigEndpointConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigEndpointConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGkeClusterConfig struct { + empty bool `json:"-"` + NamespacedGkeDeploymentTarget *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget"` +} + +type jsonClusterConfigGkeClusterConfig ClusterConfigGkeClusterConfig + +func (r *ClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGkeClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGkeClusterConfig + } else { + + r.NamespacedGkeDeploymentTarget = res.NamespacedGkeDeploymentTarget + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGkeClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGkeClusterConfig *ClusterConfigGkeClusterConfig = &ClusterConfigGkeClusterConfig{empty: true} + +func (r *ClusterConfigGkeClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGkeClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGkeClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget struct { + empty bool `json:"-"` + TargetGkeCluster *string `json:"targetGkeCluster"` + ClusterNamespace *string `json:"clusterNamespace"` +} + +type jsonClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + +func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } else { + + r.TargetGkeCluster = res.TargetGkeCluster + + r.ClusterNamespace = res.ClusterNamespace + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{empty: true} + +func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) Empty() bool { + return r.empty +} + +func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigMetastoreConfig struct { + empty bool `json:"-"` + DataprocMetastoreService *string `json:"dataprocMetastoreService"` +} + +type jsonClusterConfigMetastoreConfig ClusterConfigMetastoreConfig + +func (r *ClusterConfigMetastoreConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigMetastoreConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigMetastoreConfig + } else { + + r.DataprocMetastoreService = res.DataprocMetastoreService + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigMetastoreConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigMetastoreConfig *ClusterConfigMetastoreConfig = &ClusterConfigMetastoreConfig{empty: true} + +func (r *ClusterConfigMetastoreConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigMetastoreConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigMetastoreConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigDataprocMetricConfig struct { + empty bool `json:"-"` + Metrics []ClusterConfigDataprocMetricConfigMetrics `json:"metrics"` +} + +type jsonClusterConfigDataprocMetricConfig ClusterConfigDataprocMetricConfig + +func (r *ClusterConfigDataprocMetricConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigDataprocMetricConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigDataprocMetricConfig + } else { + + r.Metrics = res.Metrics + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigDataprocMetricConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigDataprocMetricConfig *ClusterConfigDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{empty: true} + +func (r *ClusterConfigDataprocMetricConfig) Empty() bool { + return r.empty +} + +func (r *ClusterConfigDataprocMetricConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigDataprocMetricConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterConfigDataprocMetricConfigMetrics struct { + empty bool `json:"-"` + MetricSource *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum `json:"metricSource"` + MetricOverrides []string `json:"metricOverrides"` +} + +type jsonClusterConfigDataprocMetricConfigMetrics ClusterConfigDataprocMetricConfigMetrics + +func (r *ClusterConfigDataprocMetricConfigMetrics) UnmarshalJSON(data []byte) error { + var res jsonClusterConfigDataprocMetricConfigMetrics + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterConfigDataprocMetricConfigMetrics + } else { + + r.MetricSource = res.MetricSource + + r.MetricOverrides = res.MetricOverrides + + } + return nil +} + +// This object is used to assert a desired state where this ClusterConfigDataprocMetricConfigMetrics is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterConfigDataprocMetricConfigMetrics *ClusterConfigDataprocMetricConfigMetrics = &ClusterConfigDataprocMetricConfigMetrics{empty: true} + +func (r *ClusterConfigDataprocMetricConfigMetrics) Empty() bool { + return r.empty +} + +func (r *ClusterConfigDataprocMetricConfigMetrics) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterConfigDataprocMetricConfigMetrics) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterStatus struct { + empty bool `json:"-"` + State *ClusterStatusStateEnum `json:"state"` + Detail *string `json:"detail"` + StateStartTime *string `json:"stateStartTime"` + Substate *ClusterStatusSubstateEnum `json:"substate"` +} + +type jsonClusterStatus ClusterStatus + +func (r *ClusterStatus) UnmarshalJSON(data []byte) error { + var res jsonClusterStatus + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterStatus + } else { + + r.State = res.State + + r.Detail = res.Detail + + r.StateStartTime = res.StateStartTime + + r.Substate = res.Substate + + } + return nil +} + +// This object is used to assert a desired state where this ClusterStatus is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterStatus *ClusterStatus = &ClusterStatus{empty: true} + +func (r *ClusterStatus) Empty() bool { + return r.empty +} + +func (r *ClusterStatus) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterStatus) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterStatusHistory struct { + empty bool `json:"-"` + State *ClusterStatusHistoryStateEnum `json:"state"` + Detail *string `json:"detail"` + StateStartTime *string `json:"stateStartTime"` + Substate *ClusterStatusHistorySubstateEnum `json:"substate"` +} + +type jsonClusterStatusHistory ClusterStatusHistory + +func (r *ClusterStatusHistory) UnmarshalJSON(data []byte) error { + var res jsonClusterStatusHistory + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterStatusHistory + } else { + + r.State = res.State + + r.Detail = res.Detail + + r.StateStartTime = res.StateStartTime + + r.Substate = res.Substate + + } + return nil +} + +// This object is used to assert a desired state where this ClusterStatusHistory is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterStatusHistory *ClusterStatusHistory = &ClusterStatusHistory{empty: true} + +func (r *ClusterStatusHistory) Empty() bool { + return r.empty +} + +func (r *ClusterStatusHistory) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterStatusHistory) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterMetrics struct { + empty bool `json:"-"` + HdfsMetrics map[string]string `json:"hdfsMetrics"` + YarnMetrics map[string]string `json:"yarnMetrics"` +} + +type jsonClusterMetrics ClusterMetrics + +func (r *ClusterMetrics) UnmarshalJSON(data []byte) error { + var res jsonClusterMetrics + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterMetrics + } else { + + r.HdfsMetrics = res.HdfsMetrics + + r.YarnMetrics = res.YarnMetrics + + } + return nil +} + +// This object is used to assert a desired state where this ClusterMetrics is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterMetrics *ClusterMetrics = &ClusterMetrics{empty: true} + +func (r *ClusterMetrics) Empty() bool { + return r.empty +} + +func (r *ClusterMetrics) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterMetrics) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfig struct { + empty bool `json:"-"` + StagingBucket *string `json:"stagingBucket"` + KubernetesClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfig `json:"kubernetesClusterConfig"` + AuxiliaryServicesConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfig `json:"auxiliaryServicesConfig"` +} + +type jsonClusterVirtualClusterConfig ClusterVirtualClusterConfig + +func (r *ClusterVirtualClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfig + } else { + + r.StagingBucket = res.StagingBucket + + r.KubernetesClusterConfig = res.KubernetesClusterConfig + + r.AuxiliaryServicesConfig = res.AuxiliaryServicesConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfig *ClusterVirtualClusterConfig = &ClusterVirtualClusterConfig{empty: true} + +func (r *ClusterVirtualClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfig struct { + empty bool `json:"-"` + KubernetesNamespace *string `json:"kubernetesNamespace"` + GkeClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` + KubernetesSoftwareConfig *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig `json:"kubernetesSoftwareConfig"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfig + } else { + + r.KubernetesNamespace = res.KubernetesNamespace + + r.GkeClusterConfig = res.GkeClusterConfig + + r.KubernetesSoftwareConfig = res.KubernetesSoftwareConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig struct { + empty bool `json:"-"` + GkeClusterTarget *string `json:"gkeClusterTarget"` + NodePoolTarget []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget `json:"nodePoolTarget"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + } else { + + r.GkeClusterTarget = res.GkeClusterTarget + + r.NodePoolTarget = res.NodePoolTarget + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget struct { + empty bool `json:"-"` + NodePool *string `json:"nodePool"` + Roles []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum `json:"roles"` + NodePoolConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig `json:"nodePoolConfig"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + } else { + + r.NodePool = res.NodePool + + r.Roles = res.Roles + + r.NodePoolConfig = res.NodePoolConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig struct { + empty bool `json:"-"` + Config *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig `json:"config"` + Locations []string `json:"locations"` + Autoscaling *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling `json:"autoscaling"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + } else { + + r.Config = res.Config + + r.Locations = res.Locations + + r.Autoscaling = res.Autoscaling + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig struct { + empty bool `json:"-"` + MachineType *string `json:"machineType"` + LocalSsdCount *int64 `json:"localSsdCount"` + Preemptible *bool `json:"preemptible"` + Accelerators []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` + BootDiskKmsKey *string `json:"bootDiskKmsKey"` + EphemeralStorageConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig `json:"ephemeralStorageConfig"` + Spot *bool `json:"spot"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + } else { + + r.MachineType = res.MachineType + + r.LocalSsdCount = res.LocalSsdCount + + r.Preemptible = res.Preemptible + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + r.BootDiskKmsKey = res.BootDiskKmsKey + + r.EphemeralStorageConfig = res.EphemeralStorageConfig + + r.Spot = res.Spot + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorCount *int64 `json:"acceleratorCount"` + AcceleratorType *string `json:"acceleratorType"` + GpuPartitionSize *string `json:"gpuPartitionSize"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + } else { + + r.AcceleratorCount = res.AcceleratorCount + + r.AcceleratorType = res.AcceleratorType + + r.GpuPartitionSize = res.GpuPartitionSize + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig struct { + empty bool `json:"-"` + LocalSsdCount *int64 `json:"localSsdCount"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + } else { + + r.LocalSsdCount = res.LocalSsdCount + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling struct { + empty bool `json:"-"` + MinNodeCount *int64 `json:"minNodeCount"` + MaxNodeCount *int64 `json:"maxNodeCount"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + } else { + + r.MinNodeCount = res.MinNodeCount + + r.MaxNodeCount = res.MaxNodeCount + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig struct { + empty bool `json:"-"` + ComponentVersion map[string]string `json:"componentVersion"` + Properties map[string]string `json:"properties"` +} + +type jsonClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + } else { + + r.ComponentVersion = res.ComponentVersion + + r.Properties = res.Properties + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{empty: true} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigAuxiliaryServicesConfig struct { + empty bool `json:"-"` + MetastoreConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig `json:"metastoreConfig"` + SparkHistoryServerConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig `json:"sparkHistoryServerConfig"` +} + +type jsonClusterVirtualClusterConfigAuxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig + } else { + + r.MetastoreConfig = res.MetastoreConfig + + r.SparkHistoryServerConfig = res.SparkHistoryServerConfig + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{empty: true} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig struct { + empty bool `json:"-"` + DataprocMetastoreService *string `json:"dataprocMetastoreService"` +} + +type jsonClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + } else { + + r.DataprocMetastoreService = res.DataprocMetastoreService + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{empty: true} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig struct { + empty bool `json:"-"` + DataprocCluster *string `json:"dataprocCluster"` +} + +type jsonClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) UnmarshalJSON(data []byte) error { + var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + } else { + + r.DataprocCluster = res.DataprocCluster + + } + return nil +} + +// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{empty: true} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) Empty() bool { + return r.empty +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Cluster) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataproc", + Type: "Cluster", + Version: "beta", + } +} + +func (r *Cluster) ID() (string, error) { + if err := extractClusterFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + "config": dcl.ValueOrEmptyString(nr.Config), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "status": dcl.ValueOrEmptyString(nr.Status), + "status_history": dcl.ValueOrEmptyString(nr.StatusHistory), + "cluster_uuid": dcl.ValueOrEmptyString(nr.ClusterUuid), + "metrics": dcl.ValueOrEmptyString(nr.Metrics), + "location": dcl.ValueOrEmptyString(nr.Location), + "virtual_cluster_config": dcl.ValueOrEmptyString(nr.VirtualClusterConfig), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ClusterMaxPage = -1 + +type ClusterList struct { + Items []*Cluster + + nextToken string + + pageSize int32 + + resource *Cluster +} + +func (l *ClusterList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ClusterList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) + +} + +func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Cluster{ + Project: &project, + Location: &location, + } + items, token, err := c.listCluster(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ClusterList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractClusterFields(r) + + b, err := c.getClusterRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalCluster(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeClusterNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractClusterFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Cluster resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") + deleteOp := deleteClusterOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllCluster deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { + listObj, err := c.ListCluster(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllCluster(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Cluster + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractClusterFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []clusterApiOperation + if create { + ops = append(ops, &createClusterOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetCluster(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createClusterOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapCluster(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractClusterFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractClusterFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffCluster(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Cluster) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + body.WriteString(fmt.Sprintf(`{"options":{"requestedPolicyVersion": %d}}`, r.IAMPolicyVersion())) + return u, "POST", body, nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl new file mode 100644 index 000000000000..aa6867462f81 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl @@ -0,0 +1,18408 @@ +package dataproc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Cluster) validate() error { + + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Status) { + if err := r.Status.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Metrics) { + if err := r.Metrics.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.VirtualClusterConfig) { + if err := r.VirtualClusterConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.GceClusterConfig) { + if err := r.GceClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MasterConfig) { + if err := r.MasterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { + if err := r.SecondaryWorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SoftwareConfig) { + if err := r.SoftwareConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { + if err := r.EncryptionConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AutoscalingConfig) { + if err := r.AutoscalingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecurityConfig) { + if err := r.SecurityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LifecycleConfig) { + if err := r.LifecycleConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EndpointConfig) { + if err := r.EndpointConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { + if err := r.GkeClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { + if err := r.MetastoreConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.DataprocMetricConfig) { + if err := r.DataprocMetricConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigGceClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ReservationAffinity) { + if err := r.ReservationAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NodeGroupAffinity) { + if err := r.NodeGroupAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ShieldedInstanceConfig) { + if err := r.ShieldedInstanceConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ConfidentialInstanceConfig) { + if err := r.ConfidentialInstanceConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigGceClusterConfigReservationAffinity) validate() error { + return nil +} +func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) validate() error { + if err := dcl.Required(r, "nodeGroup"); err != nil { + return err + } + return nil +} +func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) validate() error { + return nil +} +func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) validate() error { + return nil +} +func (r *ClusterConfigMasterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigMasterConfigDiskConfig) validate() error { + return nil +} +func (r *ClusterConfigMasterConfigManagedGroupConfig) validate() error { + return nil +} +func (r *ClusterConfigMasterConfigAccelerators) validate() error { + return nil +} +func (r *ClusterConfigMasterConfigInstanceReferences) validate() error { + return nil +} +func (r *ClusterConfigWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *ClusterConfigWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *ClusterConfigWorkerConfigAccelerators) validate() error { + return nil +} +func (r *ClusterConfigWorkerConfigInstanceReferences) validate() error { + return nil +} +func (r *ClusterConfigSecondaryWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *ClusterConfigSecondaryWorkerConfigAccelerators) validate() error { + return nil +} +func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) validate() error { + return nil +} +func (r *ClusterConfigSoftwareConfig) validate() error { + return nil +} +func (r *ClusterConfigInitializationActions) validate() error { + if err := dcl.Required(r, "executableFile"); err != nil { + return err + } + return nil +} +func (r *ClusterConfigEncryptionConfig) validate() error { + return nil +} +func (r *ClusterConfigAutoscalingConfig) validate() error { + return nil +} +func (r *ClusterConfigSecurityConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.KerberosConfig) { + if err := r.KerberosConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.IdentityConfig) { + if err := r.IdentityConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigSecurityConfigKerberosConfig) validate() error { + return nil +} +func (r *ClusterConfigSecurityConfigIdentityConfig) validate() error { + if err := dcl.Required(r, "userServiceAccountMapping"); err != nil { + return err + } + return nil +} +func (r *ClusterConfigLifecycleConfig) validate() error { + return nil +} +func (r *ClusterConfigEndpointConfig) validate() error { + return nil +} +func (r *ClusterConfigGkeClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.NamespacedGkeDeploymentTarget) { + if err := r.NamespacedGkeDeploymentTarget.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) validate() error { + return nil +} +func (r *ClusterConfigMetastoreConfig) validate() error { + if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { + return err + } + return nil +} +func (r *ClusterConfigDataprocMetricConfig) validate() error { + if err := dcl.Required(r, "metrics"); err != nil { + return err + } + return nil +} +func (r *ClusterConfigDataprocMetricConfigMetrics) validate() error { + if err := dcl.Required(r, "metricSource"); err != nil { + return err + } + return nil +} +func (r *ClusterStatus) validate() error { + return nil +} +func (r *ClusterStatusHistory) validate() error { + return nil +} +func (r *ClusterMetrics) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfig) validate() error { + if err := dcl.Required(r, "kubernetesClusterConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.KubernetesClusterConfig) { + if err := r.KubernetesClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AuxiliaryServicesConfig) { + if err := r.AuxiliaryServicesConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) validate() error { + if err := dcl.Required(r, "gkeClusterConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { + if err := r.GkeClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.KubernetesSoftwareConfig) { + if err := r.KubernetesSoftwareConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) validate() error { + if err := dcl.Required(r, "nodePool"); err != nil { + return err + } + if err := dcl.Required(r, "roles"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.NodePoolConfig) { + if err := r.NodePoolConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Autoscaling) { + if err := r.Autoscaling.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.EphemeralStorageConfig) { + if err := r.EphemeralStorageConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) validate() error { + return nil +} +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { + if err := r.MetastoreConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkHistoryServerConfig) { + if err := r.SparkHistoryServerConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) validate() error { + if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { + return err + } + return nil +} +func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) validate() error { + return nil +} +func (r *Cluster) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) +} + +func (r *Cluster) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters", nr.basePath(), userBasePath, params), nil + +} + +func (r *Cluster) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Cluster) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{ + "project": *nr.Project, + "location": *nr.Location, + "name": *nr.Name, + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}:setIamPolicy", nr.basePath(), userBasePath, fields) +} + +func (r *Cluster) SetPolicyVerb() string { + return "POST" +} + +func (r *Cluster) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{ + "project": *nr.Project, + "location": *nr.Location, + "name": *nr.Name, + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}:getIamPolicy", nr.basePath(), userBasePath, fields) +} + +func (r *Cluster) IAMPolicyVersion() int { + return 3 +} + +// clusterApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type clusterApiOperation interface { + do(context.Context, *Cluster, *Client) error +} + +// newUpdateClusterUpdateClusterRequest creates a request for an +// Cluster resource's UpdateCluster update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateClusterUpdateClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + return req, nil +} + +// marshalUpdateClusterUpdateClusterRequest converts the update into +// the final JSON request body. +func marshalUpdateClusterUpdateClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateClusterUpdateClusterOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateClusterUpdateClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + _, err := c.GetCluster(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateCluster") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateClusterUpdateClusterRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateClusterUpdateClusterRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ClusterMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listClusterOperation struct { + Clusters []map[string]interface{} `json:"clusters"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { + b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listClusterOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Cluster + for _, v := range m.Clusters { + res, err := unmarshalMapCluster(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteCluster(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteClusterOperation struct{} + +func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + r, err := c.GetCluster(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetCluster(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createClusterOperation struct { + response map[string]interface{} +} + +func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetCluster(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Cluster + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Cluster); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetCluster(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractClusterFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffCluster(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Config = canonicalizeClusterConfig(rawDesired.Config, nil, opts...) + rawDesired.Status = canonicalizeClusterStatus(rawDesired.Status, nil, opts...) + rawDesired.Metrics = canonicalizeClusterMetrics(rawDesired.Metrics, nil, opts...) + rawDesired.VirtualClusterConfig = canonicalizeClusterVirtualClusterConfig(rawDesired.VirtualClusterConfig, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Cluster{} + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + canonicalDesired.Config = canonicalizeClusterConfig(rawDesired.Config, rawInitial.Config, opts...) + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + canonicalDesired.VirtualClusterConfig = canonicalizeClusterVirtualClusterConfig(rawDesired.VirtualClusterConfig, rawInitial.VirtualClusterConfig, opts...) + return canonicalDesired, nil +} + +func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { + + rawNew.Project = rawDesired.Project + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { + rawNew.Config = rawDesired.Config + } else { + rawNew.Config = canonicalizeNewClusterConfig(c, rawDesired.Config, rawNew.Config) + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Status) && dcl.IsEmptyValueIndirect(rawDesired.Status) { + rawNew.Status = rawDesired.Status + } else { + rawNew.Status = canonicalizeNewClusterStatus(c, rawDesired.Status, rawNew.Status) + } + + if dcl.IsEmptyValueIndirect(rawNew.StatusHistory) && dcl.IsEmptyValueIndirect(rawDesired.StatusHistory) { + rawNew.StatusHistory = rawDesired.StatusHistory + } else { + rawNew.StatusHistory = canonicalizeNewClusterStatusHistorySlice(c, rawDesired.StatusHistory, rawNew.StatusHistory) + } + + if dcl.IsEmptyValueIndirect(rawNew.ClusterUuid) && dcl.IsEmptyValueIndirect(rawDesired.ClusterUuid) { + rawNew.ClusterUuid = rawDesired.ClusterUuid + } else { + if dcl.StringCanonicalize(rawDesired.ClusterUuid, rawNew.ClusterUuid) { + rawNew.ClusterUuid = rawDesired.ClusterUuid + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Metrics) && dcl.IsEmptyValueIndirect(rawDesired.Metrics) { + rawNew.Metrics = rawDesired.Metrics + } else { + rawNew.Metrics = canonicalizeNewClusterMetrics(c, rawDesired.Metrics, rawNew.Metrics) + } + + rawNew.Location = rawDesired.Location + + if dcl.IsEmptyValueIndirect(rawNew.VirtualClusterConfig) && dcl.IsEmptyValueIndirect(rawDesired.VirtualClusterConfig) { + rawNew.VirtualClusterConfig = rawDesired.VirtualClusterConfig + } else { + rawNew.VirtualClusterConfig = canonicalizeNewClusterVirtualClusterConfig(c, rawDesired.VirtualClusterConfig, rawNew.VirtualClusterConfig) + } + + return rawNew, nil +} + +func canonicalizeClusterConfig(des, initial *ClusterConfig, opts ...dcl.ApplyOption) *ClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfig{} + + if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.StagingBucket = initial.StagingBucket + } else { + cDes.StagingBucket = des.StagingBucket + } + if dcl.IsZeroValue(des.TempBucket) || (dcl.IsEmptyValueIndirect(des.TempBucket) && dcl.IsEmptyValueIndirect(initial.TempBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TempBucket = initial.TempBucket + } else { + cDes.TempBucket = des.TempBucket + } + cDes.GceClusterConfig = canonicalizeClusterConfigGceClusterConfig(des.GceClusterConfig, initial.GceClusterConfig, opts...) + cDes.MasterConfig = canonicalizeClusterConfigMasterConfig(des.MasterConfig, initial.MasterConfig, opts...) + cDes.WorkerConfig = canonicalizeClusterConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) + cDes.SecondaryWorkerConfig = canonicalizeClusterConfigSecondaryWorkerConfig(des.SecondaryWorkerConfig, initial.SecondaryWorkerConfig, opts...) + cDes.SoftwareConfig = canonicalizeClusterConfigSoftwareConfig(des.SoftwareConfig, initial.SoftwareConfig, opts...) + cDes.InitializationActions = canonicalizeClusterConfigInitializationActionsSlice(des.InitializationActions, initial.InitializationActions, opts...) + cDes.EncryptionConfig = canonicalizeClusterConfigEncryptionConfig(des.EncryptionConfig, initial.EncryptionConfig, opts...) + cDes.AutoscalingConfig = canonicalizeClusterConfigAutoscalingConfig(des.AutoscalingConfig, initial.AutoscalingConfig, opts...) + cDes.SecurityConfig = canonicalizeClusterConfigSecurityConfig(des.SecurityConfig, initial.SecurityConfig, opts...) + cDes.LifecycleConfig = canonicalizeClusterConfigLifecycleConfig(des.LifecycleConfig, initial.LifecycleConfig, opts...) + cDes.EndpointConfig = canonicalizeClusterConfigEndpointConfig(des.EndpointConfig, initial.EndpointConfig, opts...) + cDes.GkeClusterConfig = canonicalizeClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) + cDes.MetastoreConfig = canonicalizeClusterConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) + cDes.DataprocMetricConfig = canonicalizeClusterConfigDataprocMetricConfig(des.DataprocMetricConfig, initial.DataprocMetricConfig, opts...) + + return cDes +} + +func canonicalizeClusterConfigSlice(des, initial []ClusterConfig, opts ...dcl.ApplyOption) []ClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfig(c *Client, des, nw *ClusterConfig) *ClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GceClusterConfig = canonicalizeNewClusterConfigGceClusterConfig(c, des.GceClusterConfig, nw.GceClusterConfig) + nw.MasterConfig = canonicalizeNewClusterConfigMasterConfig(c, des.MasterConfig, nw.MasterConfig) + nw.WorkerConfig = canonicalizeNewClusterConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) + nw.SecondaryWorkerConfig = canonicalizeNewClusterConfigSecondaryWorkerConfig(c, des.SecondaryWorkerConfig, nw.SecondaryWorkerConfig) + nw.SoftwareConfig = canonicalizeNewClusterConfigSoftwareConfig(c, des.SoftwareConfig, nw.SoftwareConfig) + nw.InitializationActions = canonicalizeNewClusterConfigInitializationActionsSlice(c, des.InitializationActions, nw.InitializationActions) + nw.EncryptionConfig = canonicalizeNewClusterConfigEncryptionConfig(c, des.EncryptionConfig, nw.EncryptionConfig) + nw.AutoscalingConfig = canonicalizeNewClusterConfigAutoscalingConfig(c, des.AutoscalingConfig, nw.AutoscalingConfig) + nw.SecurityConfig = canonicalizeNewClusterConfigSecurityConfig(c, des.SecurityConfig, nw.SecurityConfig) + nw.LifecycleConfig = canonicalizeNewClusterConfigLifecycleConfig(c, des.LifecycleConfig, nw.LifecycleConfig) + nw.EndpointConfig = canonicalizeNewClusterConfigEndpointConfig(c, des.EndpointConfig, nw.EndpointConfig) + nw.GkeClusterConfig = canonicalizeNewClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) + nw.MetastoreConfig = canonicalizeNewClusterConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) + nw.DataprocMetricConfig = canonicalizeNewClusterConfigDataprocMetricConfig(c, des.DataprocMetricConfig, nw.DataprocMetricConfig) + + return nw +} + +func canonicalizeNewClusterConfigSet(c *Client, des, nw []ClusterConfig) []ClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSlice(c *Client, des, nw []ClusterConfig) []ClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGceClusterConfig(des, initial *ClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGceClusterConfig{} + + if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { + cDes.Zone = initial.Zone + } else { + cDes.Zone = des.Zone + } + if dcl.IsZeroValue(des.Network) || (dcl.IsEmptyValueIndirect(des.Network) && dcl.IsEmptyValueIndirect(initial.Network)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Network = initial.Network + } else { + cDes.Network = des.Network + } + if dcl.IsZeroValue(des.Subnetwork) || (dcl.IsEmptyValueIndirect(des.Subnetwork) && dcl.IsEmptyValueIndirect(initial.Subnetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Subnetwork = initial.Subnetwork + } else { + cDes.Subnetwork = des.Subnetwork + } + if dcl.BoolCanonicalize(des.InternalIPOnly, initial.InternalIPOnly) || dcl.IsZeroValue(des.InternalIPOnly) { + cDes.InternalIPOnly = initial.InternalIPOnly + } else { + cDes.InternalIPOnly = des.InternalIPOnly + } + if dcl.IsZeroValue(des.PrivateIPv6GoogleAccess) || (dcl.IsEmptyValueIndirect(des.PrivateIPv6GoogleAccess) && dcl.IsEmptyValueIndirect(initial.PrivateIPv6GoogleAccess)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PrivateIPv6GoogleAccess = initial.PrivateIPv6GoogleAccess + } else { + cDes.PrivateIPv6GoogleAccess = des.PrivateIPv6GoogleAccess + } + if dcl.IsZeroValue(des.ServiceAccount) || (dcl.IsEmptyValueIndirect(des.ServiceAccount) && dcl.IsEmptyValueIndirect(initial.ServiceAccount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ServiceAccount = initial.ServiceAccount + } else { + cDes.ServiceAccount = des.ServiceAccount + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, initial.ServiceAccountScopes) { + cDes.ServiceAccountScopes = initial.ServiceAccountScopes + } else { + cDes.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, initial.Tags) { + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.IsZeroValue(des.Metadata) || (dcl.IsEmptyValueIndirect(des.Metadata) && dcl.IsEmptyValueIndirect(initial.Metadata)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Metadata = initial.Metadata + } else { + cDes.Metadata = des.Metadata + } + cDes.ReservationAffinity = canonicalizeClusterConfigGceClusterConfigReservationAffinity(des.ReservationAffinity, initial.ReservationAffinity, opts...) + cDes.NodeGroupAffinity = canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(des.NodeGroupAffinity, initial.NodeGroupAffinity, opts...) + cDes.ShieldedInstanceConfig = canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(des.ShieldedInstanceConfig, initial.ShieldedInstanceConfig, opts...) + cDes.ConfidentialInstanceConfig = canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(des.ConfidentialInstanceConfig, initial.ConfidentialInstanceConfig, opts...) + + return cDes +} + +func canonicalizeClusterConfigGceClusterConfigSlice(des, initial []ClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGceClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGceClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGceClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGceClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGceClusterConfig(c *Client, des, nw *ClusterConfigGceClusterConfig) *ClusterConfigGceClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Zone, nw.Zone) { + nw.Zone = des.Zone + } + if dcl.BoolCanonicalize(des.InternalIPOnly, nw.InternalIPOnly) { + nw.InternalIPOnly = des.InternalIPOnly + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, nw.ServiceAccountScopes) { + nw.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, nw.Tags) { + nw.Tags = des.Tags + } + nw.ReservationAffinity = canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, des.ReservationAffinity, nw.ReservationAffinity) + nw.NodeGroupAffinity = canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, des.NodeGroupAffinity, nw.NodeGroupAffinity) + nw.ShieldedInstanceConfig = canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, des.ShieldedInstanceConfig, nw.ShieldedInstanceConfig) + nw.ConfidentialInstanceConfig = canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, des.ConfidentialInstanceConfig, nw.ConfidentialInstanceConfig) + + return nw +} + +func canonicalizeNewClusterConfigGceClusterConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfig) []ClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGceClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGceClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGceClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGceClusterConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfig) []ClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGceClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGceClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGceClusterConfigReservationAffinity(des, initial *ClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsZeroValue(des.ConsumeReservationType) || (dcl.IsEmptyValueIndirect(des.ConsumeReservationType) && dcl.IsEmptyValueIndirect(initial.ConsumeReservationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConsumeReservationType = initial.ConsumeReservationType + } else { + cDes.ConsumeReservationType = des.ConsumeReservationType + } + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, initial.Values) { + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + + return cDes +} + +func canonicalizeClusterConfigGceClusterConfigReservationAffinitySlice(des, initial []ClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigReservationAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigReservationAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigReservationAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c *Client, des, nw *ClusterConfigGceClusterConfigReservationAffinity) *ClusterConfigGceClusterConfigReservationAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigReservationAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, nw.Values) { + nw.Values = des.Values + } + + return nw +} + +func canonicalizeNewClusterConfigGceClusterConfigReservationAffinitySet(c *Client, des, nw []ClusterConfigGceClusterConfigReservationAffinity) []ClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGceClusterConfigReservationAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGceClusterConfigReservationAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, des, nw []ClusterConfigGceClusterConfigReservationAffinity) []ClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGceClusterConfigReservationAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(des, initial *ClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsZeroValue(des.NodeGroup) || (dcl.IsEmptyValueIndirect(des.NodeGroup) && dcl.IsEmptyValueIndirect(initial.NodeGroup)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NodeGroup = initial.NodeGroup + } else { + cDes.NodeGroup = des.NodeGroup + } + + return cDes +} + +func canonicalizeClusterConfigGceClusterConfigNodeGroupAffinitySlice(des, initial []ClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigNodeGroupAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, des, nw *ClusterConfigGceClusterConfigNodeGroupAffinity) *ClusterConfigGceClusterConfigNodeGroupAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigNodeGroupAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinitySet(c *Client, des, nw []ClusterConfigGceClusterConfigNodeGroupAffinity) []ClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGceClusterConfigNodeGroupAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, des, nw []ClusterConfigGceClusterConfigNodeGroupAffinity) []ClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGceClusterConfigNodeGroupAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(des, initial *ClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.BoolCanonicalize(des.EnableSecureBoot, initial.EnableSecureBoot) || dcl.IsZeroValue(des.EnableSecureBoot) { + cDes.EnableSecureBoot = initial.EnableSecureBoot + } else { + cDes.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, initial.EnableVtpm) || dcl.IsZeroValue(des.EnableVtpm) { + cDes.EnableVtpm = initial.EnableVtpm + } else { + cDes.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, initial.EnableIntegrityMonitoring) || dcl.IsZeroValue(des.EnableIntegrityMonitoring) { + cDes.EnableIntegrityMonitoring = initial.EnableIntegrityMonitoring + } else { + cDes.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return cDes +} + +func canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfigSlice(des, initial []ClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigShieldedInstanceConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, des, nw *ClusterConfigGceClusterConfigShieldedInstanceConfig) *ClusterConfigGceClusterConfigShieldedInstanceConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigShieldedInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableSecureBoot, nw.EnableSecureBoot) { + nw.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, nw.EnableVtpm) { + nw.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, nw.EnableIntegrityMonitoring) { + nw.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return nw +} + +func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfigShieldedInstanceConfig) []ClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGceClusterConfigShieldedInstanceConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfigShieldedInstanceConfig) []ClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGceClusterConfigShieldedInstanceConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(des, initial *ClusterConfigGceClusterConfigConfidentialInstanceConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + + if dcl.BoolCanonicalize(des.EnableConfidentialCompute, initial.EnableConfidentialCompute) || dcl.IsZeroValue(des.EnableConfidentialCompute) { + cDes.EnableConfidentialCompute = initial.EnableConfidentialCompute + } else { + cDes.EnableConfidentialCompute = des.EnableConfidentialCompute + } + + return cDes +} + +func canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(des, initial []ClusterConfigGceClusterConfigConfidentialInstanceConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, des, nw *ClusterConfigGceClusterConfigConfidentialInstanceConfig) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigConfidentialInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableConfidentialCompute, nw.EnableConfidentialCompute) { + nw.EnableConfidentialCompute = des.EnableConfidentialCompute + } + + return nw +} + +func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfigConfidentialInstanceConfig) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGceClusterConfigConfidentialInstanceConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfigConfidentialInstanceConfig) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGceClusterConfigConfidentialInstanceConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMasterConfig(des, initial *ClusterConfigMasterConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMasterConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeClusterConfigMasterConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeClusterConfigMasterConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeClusterConfigMasterConfigSlice(des, initial []ClusterConfigMasterConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMasterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMasterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMasterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMasterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMasterConfig(c *Client, des, nw *ClusterConfigMasterConfig) *ClusterConfigMasterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewClusterConfigMasterConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewClusterConfigMasterConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + nw.InstanceReferences = canonicalizeNewClusterConfigMasterConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) + + return nw +} + +func canonicalizeNewClusterConfigMasterConfigSet(c *Client, des, nw []ClusterConfigMasterConfig) []ClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMasterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMasterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMasterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMasterConfigSlice(c *Client, des, nw []ClusterConfigMasterConfig) []ClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMasterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMasterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMasterConfigDiskConfig(des, initial *ClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMasterConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { + cDes.LocalSsdInterface = initial.LocalSsdInterface + } else { + cDes.LocalSsdInterface = des.LocalSsdInterface + } + + return cDes +} + +func canonicalizeClusterConfigMasterConfigDiskConfigSlice(des, initial []ClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMasterConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMasterConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMasterConfigDiskConfig(c *Client, des, nw *ClusterConfigMasterConfigDiskConfig) *ClusterConfigMasterConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { + nw.LocalSsdInterface = des.LocalSsdInterface + } + + return nw +} + +func canonicalizeNewClusterConfigMasterConfigDiskConfigSet(c *Client, des, nw []ClusterConfigMasterConfigDiskConfig) []ClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMasterConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMasterConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMasterConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMasterConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigMasterConfigDiskConfig) []ClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMasterConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMasterConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMasterConfigManagedGroupConfig(des, initial *ClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMasterConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeClusterConfigMasterConfigManagedGroupConfigSlice(des, initial []ClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMasterConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMasterConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigMasterConfigManagedGroupConfig) *ClusterConfigMasterConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewClusterConfigMasterConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigMasterConfigManagedGroupConfig) []ClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMasterConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMasterConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigMasterConfigManagedGroupConfig) []ClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMasterConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMasterConfigAccelerators(des, initial *ClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMasterConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeClusterConfigMasterConfigAcceleratorsSlice(des, initial []ClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMasterConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMasterConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMasterConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMasterConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMasterConfigAccelerators(c *Client, des, nw *ClusterConfigMasterConfigAccelerators) *ClusterConfigMasterConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewClusterConfigMasterConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigMasterConfigAccelerators) []ClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMasterConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMasterConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMasterConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMasterConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigMasterConfigAccelerators) []ClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMasterConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMasterConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMasterConfigInstanceReferences(des, initial *ClusterConfigMasterConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigInstanceReferences { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMasterConfigInstanceReferences{} + + if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { + cDes.InstanceName = initial.InstanceName + } else { + cDes.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { + cDes.InstanceId = initial.InstanceId + } else { + cDes.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { + cDes.PublicKey = initial.PublicKey + } else { + cDes.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { + cDes.PublicEciesKey = initial.PublicEciesKey + } else { + cDes.PublicEciesKey = des.PublicEciesKey + } + + return cDes +} + +func canonicalizeClusterConfigMasterConfigInstanceReferencesSlice(des, initial []ClusterConfigMasterConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigInstanceReferences { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMasterConfigInstanceReferences(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMasterConfigInstanceReferences(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMasterConfigInstanceReferences(c *Client, des, nw *ClusterConfigMasterConfigInstanceReferences) *ClusterConfigMasterConfigInstanceReferences { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { + nw.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { + nw.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { + nw.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { + nw.PublicEciesKey = des.PublicEciesKey + } + + return nw +} + +func canonicalizeNewClusterConfigMasterConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigMasterConfigInstanceReferences) []ClusterConfigMasterConfigInstanceReferences { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMasterConfigInstanceReferences + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMasterConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMasterConfigInstanceReferences(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMasterConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigMasterConfigInstanceReferences) []ClusterConfigMasterConfigInstanceReferences { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMasterConfigInstanceReferences + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMasterConfigInstanceReferences(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigWorkerConfig(des, initial *ClusterConfigWorkerConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeClusterConfigWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeClusterConfigWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeClusterConfigWorkerConfigSlice(des, initial []ClusterConfigWorkerConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigWorkerConfig(c *Client, des, nw *ClusterConfigWorkerConfig) *ClusterConfigWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewClusterConfigWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + nw.InstanceReferences = canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) + + return nw +} + +func canonicalizeNewClusterConfigWorkerConfigSet(c *Client, des, nw []ClusterConfigWorkerConfig) []ClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigWorkerConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfig) []ClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigWorkerConfigDiskConfig(des, initial *ClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { + cDes.LocalSsdInterface = initial.LocalSsdInterface + } else { + cDes.LocalSsdInterface = des.LocalSsdInterface + } + + return cDes +} + +func canonicalizeClusterConfigWorkerConfigDiskConfigSlice(des, initial []ClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigWorkerConfigDiskConfig(c *Client, des, nw *ClusterConfigWorkerConfigDiskConfig) *ClusterConfigWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { + nw.LocalSsdInterface = des.LocalSsdInterface + } + + return nw +} + +func canonicalizeNewClusterConfigWorkerConfigDiskConfigSet(c *Client, des, nw []ClusterConfigWorkerConfigDiskConfig) []ClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigWorkerConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfigDiskConfig) []ClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigWorkerConfigManagedGroupConfig(des, initial *ClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeClusterConfigWorkerConfigManagedGroupConfigSlice(des, initial []ClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigWorkerConfigManagedGroupConfig) *ClusterConfigWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigWorkerConfigManagedGroupConfig) []ClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfigManagedGroupConfig) []ClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigWorkerConfigAccelerators(des, initial *ClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeClusterConfigWorkerConfigAcceleratorsSlice(des, initial []ClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigWorkerConfigAccelerators(c *Client, des, nw *ClusterConfigWorkerConfigAccelerators) *ClusterConfigWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewClusterConfigWorkerConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigWorkerConfigAccelerators) []ClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigWorkerConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigWorkerConfigAccelerators) []ClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigWorkerConfigInstanceReferences(des, initial *ClusterConfigWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigInstanceReferences { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigWorkerConfigInstanceReferences{} + + if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { + cDes.InstanceName = initial.InstanceName + } else { + cDes.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { + cDes.InstanceId = initial.InstanceId + } else { + cDes.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { + cDes.PublicKey = initial.PublicKey + } else { + cDes.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { + cDes.PublicEciesKey = initial.PublicEciesKey + } else { + cDes.PublicEciesKey = des.PublicEciesKey + } + + return cDes +} + +func canonicalizeClusterConfigWorkerConfigInstanceReferencesSlice(des, initial []ClusterConfigWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigInstanceReferences { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigWorkerConfigInstanceReferences(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigWorkerConfigInstanceReferences(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c *Client, des, nw *ClusterConfigWorkerConfigInstanceReferences) *ClusterConfigWorkerConfigInstanceReferences { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { + nw.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { + nw.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { + nw.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { + nw.PublicEciesKey = des.PublicEciesKey + } + + return nw +} + +func canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigWorkerConfigInstanceReferences) []ClusterConfigWorkerConfigInstanceReferences { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigWorkerConfigInstanceReferences + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigWorkerConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigWorkerConfigInstanceReferences) []ClusterConfigWorkerConfigInstanceReferences { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigWorkerConfigInstanceReferences + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecondaryWorkerConfig(des, initial *ClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecondaryWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeClusterConfigSecondaryWorkerConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfig) *ClusterConfigSecondaryWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + nw.InstanceReferences = canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) + + return nw +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfig) []ClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecondaryWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfig) []ClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecondaryWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(des, initial *ClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { + cDes.LocalSsdInterface = initial.LocalSsdInterface + } else { + cDes.LocalSsdInterface = des.LocalSsdInterface + } + + return cDes +} + +func canonicalizeClusterConfigSecondaryWorkerConfigDiskConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigDiskConfig) *ClusterConfigSecondaryWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { + nw.LocalSsdInterface = des.LocalSsdInterface + } + + return nw +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigDiskConfig) []ClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecondaryWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigDiskConfig) []ClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecondaryWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(des, initial *ClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigManagedGroupConfig) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecondaryWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigManagedGroupConfig) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecondaryWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(des, initial *ClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des, initial []ClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigAccelerators) *ClusterConfigSecondaryWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigAccelerators) []ClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecondaryWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigAccelerators) []ClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecondaryWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(des, initial *ClusterConfigSecondaryWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigInstanceReferences { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecondaryWorkerConfigInstanceReferences{} + + if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { + cDes.InstanceName = initial.InstanceName + } else { + cDes.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { + cDes.InstanceId = initial.InstanceId + } else { + cDes.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { + cDes.PublicKey = initial.PublicKey + } else { + cDes.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { + cDes.PublicEciesKey = initial.PublicEciesKey + } else { + cDes.PublicEciesKey = des.PublicEciesKey + } + + return cDes +} + +func canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(des, initial []ClusterConfigSecondaryWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigInstanceReferences { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigInstanceReferences) *ClusterConfigSecondaryWorkerConfigInstanceReferences { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { + nw.InstanceName = des.InstanceName + } + if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { + nw.InstanceId = des.InstanceId + } + if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { + nw.PublicKey = des.PublicKey + } + if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { + nw.PublicEciesKey = des.PublicEciesKey + } + + return nw +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigInstanceReferences) []ClusterConfigSecondaryWorkerConfigInstanceReferences { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecondaryWorkerConfigInstanceReferences + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigInstanceReferences) []ClusterConfigSecondaryWorkerConfigInstanceReferences { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecondaryWorkerConfigInstanceReferences + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSoftwareConfig(des, initial *ClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) *ClusterConfigSoftwareConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSoftwareConfig{} + + if dcl.StringCanonicalize(des.ImageVersion, initial.ImageVersion) || dcl.IsZeroValue(des.ImageVersion) { + cDes.ImageVersion = initial.ImageVersion + } else { + cDes.ImageVersion = des.ImageVersion + } + if canonicalizeSoftwareConfigProperties(des.Properties, initial.Properties) || dcl.IsZeroValue(des.Properties) { + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.IsZeroValue(des.OptionalComponents) || (dcl.IsEmptyValueIndirect(des.OptionalComponents) && dcl.IsEmptyValueIndirect(initial.OptionalComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.OptionalComponents = initial.OptionalComponents + } else { + cDes.OptionalComponents = des.OptionalComponents + } + + return cDes +} + +func canonicalizeClusterConfigSoftwareConfigSlice(des, initial []ClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) []ClusterConfigSoftwareConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSoftwareConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSoftwareConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSoftwareConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSoftwareConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSoftwareConfig(c *Client, des, nw *ClusterConfigSoftwareConfig) *ClusterConfigSoftwareConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ImageVersion, nw.ImageVersion) { + nw.ImageVersion = des.ImageVersion + } + if canonicalizeSoftwareConfigProperties(des.Properties, nw.Properties) { + nw.Properties = des.Properties + } + + return nw +} + +func canonicalizeNewClusterConfigSoftwareConfigSet(c *Client, des, nw []ClusterConfigSoftwareConfig) []ClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSoftwareConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSoftwareConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSoftwareConfigSlice(c *Client, des, nw []ClusterConfigSoftwareConfig) []ClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSoftwareConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSoftwareConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigInitializationActions(des, initial *ClusterConfigInitializationActions, opts ...dcl.ApplyOption) *ClusterConfigInitializationActions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigInitializationActions{} + + if dcl.StringCanonicalize(des.ExecutableFile, initial.ExecutableFile) || dcl.IsZeroValue(des.ExecutableFile) { + cDes.ExecutableFile = initial.ExecutableFile + } else { + cDes.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { + cDes.ExecutionTimeout = initial.ExecutionTimeout + } else { + cDes.ExecutionTimeout = des.ExecutionTimeout + } + + return cDes +} + +func canonicalizeClusterConfigInitializationActionsSlice(des, initial []ClusterConfigInitializationActions, opts ...dcl.ApplyOption) []ClusterConfigInitializationActions { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigInitializationActions, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigInitializationActions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigInitializationActions, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigInitializationActions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigInitializationActions(c *Client, des, nw *ClusterConfigInitializationActions) *ClusterConfigInitializationActions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigInitializationActions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ExecutableFile, nw.ExecutableFile) { + nw.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { + nw.ExecutionTimeout = des.ExecutionTimeout + } + + return nw +} + +func canonicalizeNewClusterConfigInitializationActionsSet(c *Client, des, nw []ClusterConfigInitializationActions) []ClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigInitializationActions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigInitializationActionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigInitializationActions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigInitializationActionsSlice(c *Client, des, nw []ClusterConfigInitializationActions) []ClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigInitializationActions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigInitializationActions(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigEncryptionConfig(des, initial *ClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) *ClusterConfigEncryptionConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigEncryptionConfig{} + + if dcl.IsZeroValue(des.GcePdKmsKeyName) || (dcl.IsEmptyValueIndirect(des.GcePdKmsKeyName) && dcl.IsEmptyValueIndirect(initial.GcePdKmsKeyName)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcePdKmsKeyName = initial.GcePdKmsKeyName + } else { + cDes.GcePdKmsKeyName = des.GcePdKmsKeyName + } + + return cDes +} + +func canonicalizeClusterConfigEncryptionConfigSlice(des, initial []ClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) []ClusterConfigEncryptionConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigEncryptionConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigEncryptionConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigEncryptionConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigEncryptionConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigEncryptionConfig(c *Client, des, nw *ClusterConfigEncryptionConfig) *ClusterConfigEncryptionConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterConfigEncryptionConfigSet(c *Client, des, nw []ClusterConfigEncryptionConfig) []ClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigEncryptionConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigEncryptionConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigEncryptionConfigSlice(c *Client, des, nw []ClusterConfigEncryptionConfig) []ClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigEncryptionConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigEncryptionConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigAutoscalingConfig(des, initial *ClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) *ClusterConfigAutoscalingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigAutoscalingConfig{} + + if dcl.IsZeroValue(des.Policy) || (dcl.IsEmptyValueIndirect(des.Policy) && dcl.IsEmptyValueIndirect(initial.Policy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Policy = initial.Policy + } else { + cDes.Policy = des.Policy + } + + return cDes +} + +func canonicalizeClusterConfigAutoscalingConfigSlice(des, initial []ClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) []ClusterConfigAutoscalingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigAutoscalingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigAutoscalingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigAutoscalingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigAutoscalingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigAutoscalingConfig(c *Client, des, nw *ClusterConfigAutoscalingConfig) *ClusterConfigAutoscalingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigAutoscalingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterConfigAutoscalingConfigSet(c *Client, des, nw []ClusterConfigAutoscalingConfig) []ClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigAutoscalingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigAutoscalingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigAutoscalingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigAutoscalingConfigSlice(c *Client, des, nw []ClusterConfigAutoscalingConfig) []ClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigAutoscalingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigAutoscalingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecurityConfig(des, initial *ClusterConfigSecurityConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecurityConfig{} + + cDes.KerberosConfig = canonicalizeClusterConfigSecurityConfigKerberosConfig(des.KerberosConfig, initial.KerberosConfig, opts...) + cDes.IdentityConfig = canonicalizeClusterConfigSecurityConfigIdentityConfig(des.IdentityConfig, initial.IdentityConfig, opts...) + + return cDes +} + +func canonicalizeClusterConfigSecurityConfigSlice(des, initial []ClusterConfigSecurityConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecurityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecurityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecurityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecurityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecurityConfig(c *Client, des, nw *ClusterConfigSecurityConfig) *ClusterConfigSecurityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.KerberosConfig = canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, des.KerberosConfig, nw.KerberosConfig) + nw.IdentityConfig = canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, des.IdentityConfig, nw.IdentityConfig) + + return nw +} + +func canonicalizeNewClusterConfigSecurityConfigSet(c *Client, des, nw []ClusterConfigSecurityConfig) []ClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecurityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecurityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecurityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecurityConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfig) []ClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecurityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecurityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecurityConfigKerberosConfig(des, initial *ClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecurityConfigKerberosConfig{} + + if dcl.BoolCanonicalize(des.EnableKerberos, initial.EnableKerberos) || dcl.IsZeroValue(des.EnableKerberos) { + cDes.EnableKerberos = initial.EnableKerberos + } else { + cDes.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, initial.RootPrincipalPassword) || dcl.IsZeroValue(des.RootPrincipalPassword) { + cDes.RootPrincipalPassword = initial.RootPrincipalPassword + } else { + cDes.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KmsKey = initial.KmsKey + } else { + cDes.KmsKey = des.KmsKey + } + if dcl.StringCanonicalize(des.Keystore, initial.Keystore) || dcl.IsZeroValue(des.Keystore) { + cDes.Keystore = initial.Keystore + } else { + cDes.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, initial.Truststore) || dcl.IsZeroValue(des.Truststore) { + cDes.Truststore = initial.Truststore + } else { + cDes.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, initial.KeystorePassword) || dcl.IsZeroValue(des.KeystorePassword) { + cDes.KeystorePassword = initial.KeystorePassword + } else { + cDes.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, initial.KeyPassword) || dcl.IsZeroValue(des.KeyPassword) { + cDes.KeyPassword = initial.KeyPassword + } else { + cDes.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, initial.TruststorePassword) || dcl.IsZeroValue(des.TruststorePassword) { + cDes.TruststorePassword = initial.TruststorePassword + } else { + cDes.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, initial.CrossRealmTrustRealm) || dcl.IsZeroValue(des.CrossRealmTrustRealm) { + cDes.CrossRealmTrustRealm = initial.CrossRealmTrustRealm + } else { + cDes.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, initial.CrossRealmTrustKdc) || dcl.IsZeroValue(des.CrossRealmTrustKdc) { + cDes.CrossRealmTrustKdc = initial.CrossRealmTrustKdc + } else { + cDes.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, initial.CrossRealmTrustAdminServer) || dcl.IsZeroValue(des.CrossRealmTrustAdminServer) { + cDes.CrossRealmTrustAdminServer = initial.CrossRealmTrustAdminServer + } else { + cDes.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, initial.CrossRealmTrustSharedPassword) || dcl.IsZeroValue(des.CrossRealmTrustSharedPassword) { + cDes.CrossRealmTrustSharedPassword = initial.CrossRealmTrustSharedPassword + } else { + cDes.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, initial.KdcDbKey) || dcl.IsZeroValue(des.KdcDbKey) { + cDes.KdcDbKey = initial.KdcDbKey + } else { + cDes.KdcDbKey = des.KdcDbKey + } + if dcl.IsZeroValue(des.TgtLifetimeHours) || (dcl.IsEmptyValueIndirect(des.TgtLifetimeHours) && dcl.IsEmptyValueIndirect(initial.TgtLifetimeHours)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TgtLifetimeHours = initial.TgtLifetimeHours + } else { + cDes.TgtLifetimeHours = des.TgtLifetimeHours + } + if dcl.StringCanonicalize(des.Realm, initial.Realm) || dcl.IsZeroValue(des.Realm) { + cDes.Realm = initial.Realm + } else { + cDes.Realm = des.Realm + } + + return cDes +} + +func canonicalizeClusterConfigSecurityConfigKerberosConfigSlice(des, initial []ClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfigKerberosConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecurityConfigKerberosConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecurityConfigKerberosConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c *Client, des, nw *ClusterConfigSecurityConfigKerberosConfig) *ClusterConfigSecurityConfigKerberosConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfigKerberosConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableKerberos, nw.EnableKerberos) { + nw.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, nw.RootPrincipalPassword) { + nw.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.StringCanonicalize(des.Keystore, nw.Keystore) { + nw.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, nw.Truststore) { + nw.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, nw.KeystorePassword) { + nw.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, nw.KeyPassword) { + nw.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, nw.TruststorePassword) { + nw.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, nw.CrossRealmTrustRealm) { + nw.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, nw.CrossRealmTrustKdc) { + nw.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, nw.CrossRealmTrustAdminServer) { + nw.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, nw.CrossRealmTrustSharedPassword) { + nw.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, nw.KdcDbKey) { + nw.KdcDbKey = des.KdcDbKey + } + if dcl.StringCanonicalize(des.Realm, nw.Realm) { + nw.Realm = des.Realm + } + + return nw +} + +func canonicalizeNewClusterConfigSecurityConfigKerberosConfigSet(c *Client, des, nw []ClusterConfigSecurityConfigKerberosConfig) []ClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecurityConfigKerberosConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecurityConfigKerberosConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecurityConfigKerberosConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfigKerberosConfig) []ClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecurityConfigKerberosConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigSecurityConfigIdentityConfig(des, initial *ClusterConfigSecurityConfigIdentityConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfigIdentityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigSecurityConfigIdentityConfig{} + + if dcl.IsZeroValue(des.UserServiceAccountMapping) || (dcl.IsEmptyValueIndirect(des.UserServiceAccountMapping) && dcl.IsEmptyValueIndirect(initial.UserServiceAccountMapping)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UserServiceAccountMapping = initial.UserServiceAccountMapping + } else { + cDes.UserServiceAccountMapping = des.UserServiceAccountMapping + } + + return cDes +} + +func canonicalizeClusterConfigSecurityConfigIdentityConfigSlice(des, initial []ClusterConfigSecurityConfigIdentityConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfigIdentityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigSecurityConfigIdentityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigSecurityConfigIdentityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c *Client, des, nw *ClusterConfigSecurityConfigIdentityConfig) *ClusterConfigSecurityConfigIdentityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfigIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterConfigSecurityConfigIdentityConfigSet(c *Client, des, nw []ClusterConfigSecurityConfigIdentityConfig) []ClusterConfigSecurityConfigIdentityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigSecurityConfigIdentityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigSecurityConfigIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigSecurityConfigIdentityConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfigIdentityConfig) []ClusterConfigSecurityConfigIdentityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigSecurityConfigIdentityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigLifecycleConfig(des, initial *ClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) *ClusterConfigLifecycleConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigLifecycleConfig{} + + if dcl.StringCanonicalize(des.IdleDeleteTtl, initial.IdleDeleteTtl) || dcl.IsZeroValue(des.IdleDeleteTtl) { + cDes.IdleDeleteTtl = initial.IdleDeleteTtl + } else { + cDes.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.IsZeroValue(des.AutoDeleteTime) || (dcl.IsEmptyValueIndirect(des.AutoDeleteTime) && dcl.IsEmptyValueIndirect(initial.AutoDeleteTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AutoDeleteTime = initial.AutoDeleteTime + } else { + cDes.AutoDeleteTime = des.AutoDeleteTime + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, initial.AutoDeleteTtl) || dcl.IsZeroValue(des.AutoDeleteTtl) { + cDes.AutoDeleteTtl = initial.AutoDeleteTtl + } else { + cDes.AutoDeleteTtl = des.AutoDeleteTtl + } + + return cDes +} + +func canonicalizeClusterConfigLifecycleConfigSlice(des, initial []ClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) []ClusterConfigLifecycleConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigLifecycleConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigLifecycleConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigLifecycleConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigLifecycleConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigLifecycleConfig(c *Client, des, nw *ClusterConfigLifecycleConfig) *ClusterConfigLifecycleConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigLifecycleConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IdleDeleteTtl, nw.IdleDeleteTtl) { + nw.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, nw.AutoDeleteTtl) { + nw.AutoDeleteTtl = des.AutoDeleteTtl + } + + return nw +} + +func canonicalizeNewClusterConfigLifecycleConfigSet(c *Client, des, nw []ClusterConfigLifecycleConfig) []ClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigLifecycleConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigLifecycleConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigLifecycleConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigLifecycleConfigSlice(c *Client, des, nw []ClusterConfigLifecycleConfig) []ClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigLifecycleConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigLifecycleConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigEndpointConfig(des, initial *ClusterConfigEndpointConfig, opts ...dcl.ApplyOption) *ClusterConfigEndpointConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigEndpointConfig{} + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, initial.EnableHttpPortAccess) || dcl.IsZeroValue(des.EnableHttpPortAccess) { + cDes.EnableHttpPortAccess = initial.EnableHttpPortAccess + } else { + cDes.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return cDes +} + +func canonicalizeClusterConfigEndpointConfigSlice(des, initial []ClusterConfigEndpointConfig, opts ...dcl.ApplyOption) []ClusterConfigEndpointConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigEndpointConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigEndpointConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigEndpointConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigEndpointConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigEndpointConfig(c *Client, des, nw *ClusterConfigEndpointConfig) *ClusterConfigEndpointConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigEndpointConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, nw.EnableHttpPortAccess) { + nw.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return nw +} + +func canonicalizeNewClusterConfigEndpointConfigSet(c *Client, des, nw []ClusterConfigEndpointConfig) []ClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigEndpointConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigEndpointConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigEndpointConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigEndpointConfigSlice(c *Client, des, nw []ClusterConfigEndpointConfig) []ClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigEndpointConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigEndpointConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGkeClusterConfig(des, initial *ClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *ClusterConfigGkeClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGkeClusterConfig{} + + cDes.NamespacedGkeDeploymentTarget = canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des.NamespacedGkeDeploymentTarget, initial.NamespacedGkeDeploymentTarget, opts...) + + return cDes +} + +func canonicalizeClusterConfigGkeClusterConfigSlice(des, initial []ClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []ClusterConfigGkeClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGkeClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGkeClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGkeClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGkeClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGkeClusterConfig(c *Client, des, nw *ClusterConfigGkeClusterConfig) *ClusterConfigGkeClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.NamespacedGkeDeploymentTarget = canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, des.NamespacedGkeDeploymentTarget, nw.NamespacedGkeDeploymentTarget) + + return nw +} + +func canonicalizeNewClusterConfigGkeClusterConfigSet(c *Client, des, nw []ClusterConfigGkeClusterConfig) []ClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGkeClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGkeClusterConfigSlice(c *Client, des, nw []ClusterConfigGkeClusterConfig) []ClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGkeClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGkeClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des, initial *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsZeroValue(des.TargetGkeCluster) || (dcl.IsEmptyValueIndirect(des.TargetGkeCluster) && dcl.IsEmptyValueIndirect(initial.TargetGkeCluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TargetGkeCluster = initial.TargetGkeCluster + } else { + cDes.TargetGkeCluster = des.TargetGkeCluster + } + if dcl.StringCanonicalize(des.ClusterNamespace, initial.ClusterNamespace) || dcl.IsZeroValue(des.ClusterNamespace) { + cDes.ClusterNamespace = initial.ClusterNamespace + } else { + cDes.ClusterNamespace = des.ClusterNamespace + } + + return cDes +} + +func canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(des, initial []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, des, nw *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ClusterNamespace, nw.ClusterNamespace) { + nw.ClusterNamespace = des.ClusterNamespace + } + + return nw +} + +func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSet(c *Client, des, nw []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, des, nw []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigMetastoreConfig(des, initial *ClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) *ClusterConfigMetastoreConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigMetastoreConfig{} + + if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataprocMetastoreService = initial.DataprocMetastoreService + } else { + cDes.DataprocMetastoreService = des.DataprocMetastoreService + } + + return cDes +} + +func canonicalizeClusterConfigMetastoreConfigSlice(des, initial []ClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) []ClusterConfigMetastoreConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigMetastoreConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigMetastoreConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigMetastoreConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigMetastoreConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigMetastoreConfig(c *Client, des, nw *ClusterConfigMetastoreConfig) *ClusterConfigMetastoreConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterConfigMetastoreConfigSet(c *Client, des, nw []ClusterConfigMetastoreConfig) []ClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigMetastoreConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigMetastoreConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigMetastoreConfigSlice(c *Client, des, nw []ClusterConfigMetastoreConfig) []ClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigMetastoreConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigMetastoreConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigDataprocMetricConfig(des, initial *ClusterConfigDataprocMetricConfig, opts ...dcl.ApplyOption) *ClusterConfigDataprocMetricConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigDataprocMetricConfig{} + + cDes.Metrics = canonicalizeClusterConfigDataprocMetricConfigMetricsSlice(des.Metrics, initial.Metrics, opts...) + + return cDes +} + +func canonicalizeClusterConfigDataprocMetricConfigSlice(des, initial []ClusterConfigDataprocMetricConfig, opts ...dcl.ApplyOption) []ClusterConfigDataprocMetricConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigDataprocMetricConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigDataprocMetricConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigDataprocMetricConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigDataprocMetricConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigDataprocMetricConfig(c *Client, des, nw *ClusterConfigDataprocMetricConfig) *ClusterConfigDataprocMetricConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigDataprocMetricConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Metrics = canonicalizeNewClusterConfigDataprocMetricConfigMetricsSlice(c, des.Metrics, nw.Metrics) + + return nw +} + +func canonicalizeNewClusterConfigDataprocMetricConfigSet(c *Client, des, nw []ClusterConfigDataprocMetricConfig) []ClusterConfigDataprocMetricConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigDataprocMetricConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigDataprocMetricConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigDataprocMetricConfigSlice(c *Client, des, nw []ClusterConfigDataprocMetricConfig) []ClusterConfigDataprocMetricConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigDataprocMetricConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterConfigDataprocMetricConfigMetrics(des, initial *ClusterConfigDataprocMetricConfigMetrics, opts ...dcl.ApplyOption) *ClusterConfigDataprocMetricConfigMetrics { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterConfigDataprocMetricConfigMetrics{} + + if dcl.IsZeroValue(des.MetricSource) || (dcl.IsEmptyValueIndirect(des.MetricSource) && dcl.IsEmptyValueIndirect(initial.MetricSource)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MetricSource = initial.MetricSource + } else { + cDes.MetricSource = des.MetricSource + } + if dcl.StringArrayCanonicalize(des.MetricOverrides, initial.MetricOverrides) { + cDes.MetricOverrides = initial.MetricOverrides + } else { + cDes.MetricOverrides = des.MetricOverrides + } + + return cDes +} + +func canonicalizeClusterConfigDataprocMetricConfigMetricsSlice(des, initial []ClusterConfigDataprocMetricConfigMetrics, opts ...dcl.ApplyOption) []ClusterConfigDataprocMetricConfigMetrics { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterConfigDataprocMetricConfigMetrics(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterConfigDataprocMetricConfigMetrics(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c *Client, des, nw *ClusterConfigDataprocMetricConfigMetrics) *ClusterConfigDataprocMetricConfigMetrics { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterConfigDataprocMetricConfigMetrics while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.MetricOverrides, nw.MetricOverrides) { + nw.MetricOverrides = des.MetricOverrides + } + + return nw +} + +func canonicalizeNewClusterConfigDataprocMetricConfigMetricsSet(c *Client, des, nw []ClusterConfigDataprocMetricConfigMetrics) []ClusterConfigDataprocMetricConfigMetrics { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterConfigDataprocMetricConfigMetrics + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterConfigDataprocMetricConfigMetricsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterConfigDataprocMetricConfigMetricsSlice(c *Client, des, nw []ClusterConfigDataprocMetricConfigMetrics) []ClusterConfigDataprocMetricConfigMetrics { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterConfigDataprocMetricConfigMetrics + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterStatus(des, initial *ClusterStatus, opts ...dcl.ApplyOption) *ClusterStatus { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterStatus{} + + return cDes +} + +func canonicalizeClusterStatusSlice(des, initial []ClusterStatus, opts ...dcl.ApplyOption) []ClusterStatus { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterStatus, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterStatus(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterStatus, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterStatus(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterStatus(c *Client, des, nw *ClusterStatus) *ClusterStatus { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterStatus while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Detail, nw.Detail) { + nw.Detail = des.Detail + } + + return nw +} + +func canonicalizeNewClusterStatusSet(c *Client, des, nw []ClusterStatus) []ClusterStatus { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterStatus + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterStatus(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterStatusSlice(c *Client, des, nw []ClusterStatus) []ClusterStatus { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterStatus + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterStatus(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterStatusHistory(des, initial *ClusterStatusHistory, opts ...dcl.ApplyOption) *ClusterStatusHistory { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterStatusHistory{} + + return cDes +} + +func canonicalizeClusterStatusHistorySlice(des, initial []ClusterStatusHistory, opts ...dcl.ApplyOption) []ClusterStatusHistory { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterStatusHistory, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterStatusHistory(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterStatusHistory, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterStatusHistory(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterStatusHistory(c *Client, des, nw *ClusterStatusHistory) *ClusterStatusHistory { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterStatusHistory while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Detail, nw.Detail) { + nw.Detail = des.Detail + } + + return nw +} + +func canonicalizeNewClusterStatusHistorySet(c *Client, des, nw []ClusterStatusHistory) []ClusterStatusHistory { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterStatusHistory + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterStatusHistoryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterStatusHistory(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterStatusHistorySlice(c *Client, des, nw []ClusterStatusHistory) []ClusterStatusHistory { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterStatusHistory + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterStatusHistory(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterMetrics(des, initial *ClusterMetrics, opts ...dcl.ApplyOption) *ClusterMetrics { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterMetrics{} + + if dcl.IsZeroValue(des.HdfsMetrics) || (dcl.IsEmptyValueIndirect(des.HdfsMetrics) && dcl.IsEmptyValueIndirect(initial.HdfsMetrics)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.HdfsMetrics = initial.HdfsMetrics + } else { + cDes.HdfsMetrics = des.HdfsMetrics + } + if dcl.IsZeroValue(des.YarnMetrics) || (dcl.IsEmptyValueIndirect(des.YarnMetrics) && dcl.IsEmptyValueIndirect(initial.YarnMetrics)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.YarnMetrics = initial.YarnMetrics + } else { + cDes.YarnMetrics = des.YarnMetrics + } + + return cDes +} + +func canonicalizeClusterMetricsSlice(des, initial []ClusterMetrics, opts ...dcl.ApplyOption) []ClusterMetrics { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterMetrics, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterMetrics(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterMetrics, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterMetrics(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterMetrics(c *Client, des, nw *ClusterMetrics) *ClusterMetrics { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterMetrics while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterMetricsSet(c *Client, des, nw []ClusterMetrics) []ClusterMetrics { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterMetrics + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterMetricsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterMetrics(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterMetricsSlice(c *Client, des, nw []ClusterMetrics) []ClusterMetrics { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterMetrics + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterMetrics(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfig(des, initial *ClusterVirtualClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfig{} + + if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.StagingBucket = initial.StagingBucket + } else { + cDes.StagingBucket = des.StagingBucket + } + cDes.KubernetesClusterConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(des.KubernetesClusterConfig, initial.KubernetesClusterConfig, opts...) + cDes.AuxiliaryServicesConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(des.AuxiliaryServicesConfig, initial.AuxiliaryServicesConfig, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigSlice(des, initial []ClusterVirtualClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfig) *ClusterVirtualClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.KubernetesClusterConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, des.KubernetesClusterConfig, nw.KubernetesClusterConfig) + nw.AuxiliaryServicesConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, des.AuxiliaryServicesConfig, nw.AuxiliaryServicesConfig) + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfig) []ClusterVirtualClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfig) []ClusterVirtualClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfig{} + + if dcl.StringCanonicalize(des.KubernetesNamespace, initial.KubernetesNamespace) || dcl.IsZeroValue(des.KubernetesNamespace) { + cDes.KubernetesNamespace = initial.KubernetesNamespace + } else { + cDes.KubernetesNamespace = des.KubernetesNamespace + } + cDes.GkeClusterConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) + cDes.KubernetesSoftwareConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(des.KubernetesSoftwareConfig, initial.KubernetesSoftwareConfig, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfig) *ClusterVirtualClusterConfigKubernetesClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KubernetesNamespace, nw.KubernetesNamespace) { + nw.KubernetesNamespace = des.KubernetesNamespace + } + nw.GkeClusterConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) + nw.KubernetesSoftwareConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, des.KubernetesSoftwareConfig, nw.KubernetesSoftwareConfig) + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + + if dcl.IsZeroValue(des.GkeClusterTarget) || (dcl.IsEmptyValueIndirect(des.GkeClusterTarget) && dcl.IsEmptyValueIndirect(initial.GkeClusterTarget)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GkeClusterTarget = initial.GkeClusterTarget + } else { + cDes.GkeClusterTarget = des.GkeClusterTarget + } + cDes.NodePoolTarget = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(des.NodePoolTarget, initial.NodePoolTarget, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.NodePoolTarget = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, des.NodePoolTarget, nw.NodePoolTarget) + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + + if dcl.IsZeroValue(des.NodePool) || (dcl.IsEmptyValueIndirect(des.NodePool) && dcl.IsEmptyValueIndirect(initial.NodePool)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NodePool = initial.NodePool + } else { + cDes.NodePool = des.NodePool + } + if dcl.IsZeroValue(des.Roles) || (dcl.IsEmptyValueIndirect(des.Roles) && dcl.IsEmptyValueIndirect(initial.Roles)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Roles = initial.Roles + } else { + cDes.Roles = des.Roles + } + cDes.NodePoolConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(des.NodePoolConfig, initial.NodePoolConfig, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.NodePoolConfig = des.NodePoolConfig + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + + cDes.Config = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(des.Config, initial.Config, opts...) + if dcl.StringArrayCanonicalize(des.Locations, initial.Locations) { + cDes.Locations = initial.Locations + } else { + cDes.Locations = des.Locations + } + cDes.Autoscaling = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(des.Autoscaling, initial.Autoscaling, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Config = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, des.Config, nw.Config) + if dcl.StringArrayCanonicalize(des.Locations, nw.Locations) { + nw.Locations = des.Locations + } + nw.Autoscaling = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, des.Autoscaling, nw.Autoscaling) + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + if dcl.IsZeroValue(des.LocalSsdCount) || (dcl.IsEmptyValueIndirect(des.LocalSsdCount) && dcl.IsEmptyValueIndirect(initial.LocalSsdCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LocalSsdCount = initial.LocalSsdCount + } else { + cDes.LocalSsdCount = des.LocalSsdCount + } + if dcl.BoolCanonicalize(des.Preemptible, initial.Preemptible) || dcl.IsZeroValue(des.Preemptible) { + cDes.Preemptible = initial.Preemptible + } else { + cDes.Preemptible = des.Preemptible + } + cDes.Accelerators = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + if dcl.StringCanonicalize(des.BootDiskKmsKey, initial.BootDiskKmsKey) || dcl.IsZeroValue(des.BootDiskKmsKey) { + cDes.BootDiskKmsKey = initial.BootDiskKmsKey + } else { + cDes.BootDiskKmsKey = des.BootDiskKmsKey + } + cDes.EphemeralStorageConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(des.EphemeralStorageConfig, initial.EphemeralStorageConfig, opts...) + if dcl.BoolCanonicalize(des.Spot, initial.Spot) || dcl.IsZeroValue(des.Spot) { + cDes.Spot = initial.Spot + } else { + cDes.Spot = des.Spot + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + if dcl.BoolCanonicalize(des.Preemptible, nw.Preemptible) { + nw.Preemptible = des.Preemptible + } + nw.Accelerators = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + if dcl.StringCanonicalize(des.BootDiskKmsKey, nw.BootDiskKmsKey) { + nw.BootDiskKmsKey = des.BootDiskKmsKey + } + nw.EphemeralStorageConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, des.EphemeralStorageConfig, nw.EphemeralStorageConfig) + if dcl.BoolCanonicalize(des.Spot, nw.Spot) { + nw.Spot = des.Spot + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.StringCanonicalize(des.GpuPartitionSize, initial.GpuPartitionSize) || dcl.IsZeroValue(des.GpuPartitionSize) { + cDes.GpuPartitionSize = initial.GpuPartitionSize + } else { + cDes.GpuPartitionSize = des.GpuPartitionSize + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + if dcl.StringCanonicalize(des.GpuPartitionSize, nw.GpuPartitionSize) { + nw.GpuPartitionSize = des.GpuPartitionSize + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + + if dcl.IsZeroValue(des.LocalSsdCount) || (dcl.IsEmptyValueIndirect(des.LocalSsdCount) && dcl.IsEmptyValueIndirect(initial.LocalSsdCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.LocalSsdCount = initial.LocalSsdCount + } else { + cDes.LocalSsdCount = des.LocalSsdCount + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + + if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MinNodeCount = initial.MinNodeCount + } else { + cDes.MinNodeCount = des.MinNodeCount + } + if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxNodeCount = initial.MaxNodeCount + } else { + cDes.MaxNodeCount = des.MaxNodeCount + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + + if dcl.IsZeroValue(des.ComponentVersion) || (dcl.IsEmptyValueIndirect(des.ComponentVersion) && dcl.IsEmptyValueIndirect(initial.ComponentVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ComponentVersion = initial.ComponentVersion + } else { + cDes.ComponentVersion = des.ComponentVersion + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + + cDes.MetastoreConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) + cDes.SparkHistoryServerConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(des.SparkHistoryServerConfig, initial.SparkHistoryServerConfig, opts...) + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.MetastoreConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) + nw.SparkHistoryServerConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, des.SparkHistoryServerConfig, nw.SparkHistoryServerConfig) + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + + if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataprocMetastoreService = initial.DataprocMetastoreService + } else { + cDes.DataprocMetastoreService = des.DataprocMetastoreService + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + + if dcl.IsZeroValue(des.DataprocCluster) || (dcl.IsEmptyValueIndirect(des.DataprocCluster) && dcl.IsEmptyValueIndirect(initial.DataprocCluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataprocCluster = initial.DataprocCluster + } else { + cDes.DataprocCluster = des.DataprocCluster + } + + return cDes +} + +func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProjectId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareClusterConfigNewStyle, EmptyObject: EmptyClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{IgnoredPrefixes: []string{"goog-dataproc-"}, OperationSelector: dcl.TriggersOperation("updateClusterUpdateClusterOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterStatusNewStyle, EmptyObject: EmptyClusterStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.StatusHistory, actual.StatusHistory, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterStatusHistoryNewStyle, EmptyObject: EmptyClusterStatusHistory, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StatusHistory")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterUuid, actual.ClusterUuid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterUuid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterMetricsNewStyle, EmptyObject: EmptyClusterMetrics, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.VirtualClusterConfig, actual.VirtualClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VirtualClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfig or *ClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TempBucket, actual.TempBucket, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TempBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GceClusterConfig, actual.GceClusterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigGceClusterConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GceClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MasterConfig, actual.MasterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MasterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SoftwareConfig, actual.SoftwareConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSoftwareConfigNewStyle, EmptyObject: EmptyClusterConfigSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SoftwareConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InitializationActions, actual.InitializationActions, dcl.DiffInfo{ObjectFunction: compareClusterConfigInitializationActionsNewStyle, EmptyObject: EmptyClusterConfigInitializationActions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InitializationActions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigEncryptionConfigNewStyle, EmptyObject: EmptyClusterConfigEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoscalingConfig, actual.AutoscalingConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigAutoscalingConfigNewStyle, EmptyObject: EmptyClusterConfigAutoscalingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoscalingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityConfig, actual.SecurityConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LifecycleConfig, actual.LifecycleConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigLifecycleConfigNewStyle, EmptyObject: EmptyClusterConfigLifecycleConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LifecycleConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EndpointConfig, actual.EndpointConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigEndpointConfigNewStyle, EmptyObject: EmptyClusterConfigEndpointConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EndpointConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigMetastoreConfigNewStyle, EmptyObject: EmptyClusterConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DataprocMetricConfig, actual.DataprocMetricConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigDataprocMetricConfigNewStyle, EmptyObject: EmptyClusterConfigDataprocMetricConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetricConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGceClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGceClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfig or *ClusterConfigGceClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGceClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ZoneUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Network, actual.Network, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Subnetwork, actual.Subnetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIPOnly, actual.InternalIPOnly, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InternalIpOnly")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateIPv6GoogleAccess, actual.PrivateIPv6GoogleAccess, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateIpv6GoogleAccess")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccountScopes, actual.ServiceAccountScopes, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountScopes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{Type: "Set", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReservationAffinity, actual.ReservationAffinity, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigReservationAffinityNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigReservationAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReservationAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodeGroupAffinity, actual.NodeGroupAffinity, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigNodeGroupAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ShieldedInstanceConfig, actual.ShieldedInstanceConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigShieldedInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ShieldedInstanceConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConfidentialInstanceConfig, actual.ConfidentialInstanceConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfidentialInstanceConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGceClusterConfigReservationAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGceClusterConfigReservationAffinity) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigReservationAffinity or *ClusterConfigGceClusterConfigReservationAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGceClusterConfigReservationAffinity) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigReservationAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConsumeReservationType, actual.ConsumeReservationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConsumeReservationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigNodeGroupAffinity or *ClusterConfigGceClusterConfigNodeGroupAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigNodeGroupAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NodeGroup, actual.NodeGroup, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigShieldedInstanceConfig or *ClusterConfigGceClusterConfigShieldedInstanceConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigShieldedInstanceConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableSecureBoot, actual.EnableSecureBoot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSecureBoot")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableVtpm, actual.EnableVtpm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableVtpm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableIntegrityMonitoring, actual.EnableIntegrityMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableIntegrityMonitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGceClusterConfigConfidentialInstanceConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigConfidentialInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigConfidentialInstanceConfig or *ClusterConfigGceClusterConfigConfidentialInstanceConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGceClusterConfigConfidentialInstanceConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGceClusterConfigConfidentialInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigConfidentialInstanceConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableConfidentialCompute, actual.EnableConfidentialCompute, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableConfidentialCompute")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMasterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMasterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfig or *ClusterConfigMasterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMasterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigMasterConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigMasterConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigMasterConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMasterConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMasterConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigDiskConfig or *ClusterConfigMasterConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMasterConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMasterConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMasterConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigManagedGroupConfig or *ClusterConfigMasterConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMasterConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMasterConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMasterConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigAccelerators or *ClusterConfigMasterConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMasterConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMasterConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMasterConfigInstanceReferences) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMasterConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigInstanceReferences or *ClusterConfigMasterConfigInstanceReferences", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMasterConfigInstanceReferences) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMasterConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigInstanceReferences", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfig or *ClusterConfigWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigWorkerConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigWorkerConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigDiskConfig or *ClusterConfigWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigManagedGroupConfig or *ClusterConfigWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigAccelerators or *ClusterConfigWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(ClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigWorkerConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigWorkerConfigInstanceReferences) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigWorkerConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigInstanceReferences or *ClusterConfigWorkerConfigInstanceReferences", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigWorkerConfigInstanceReferences) + if !ok { + actualNotPointer, ok := a.(ClusterConfigWorkerConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigInstanceReferences", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecondaryWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfig or *ClusterConfigSecondaryWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecondaryWorkerConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigDiskConfig or *ClusterConfigSecondaryWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigManagedGroupConfig or *ClusterConfigSecondaryWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigAccelerators or *ClusterConfigSecondaryWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecondaryWorkerConfigInstanceReferences) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigInstanceReferences or *ClusterConfigSecondaryWorkerConfigInstanceReferences", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecondaryWorkerConfigInstanceReferences) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigInstanceReferences) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigInstanceReferences", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSoftwareConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSoftwareConfig or *ClusterConfigSoftwareConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSoftwareConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSoftwareConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ImageVersion, actual.ImageVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{CustomDiff: canonicalizeSoftwareConfigProperties, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.OptionalComponents, actual.OptionalComponents, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OptionalComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigInitializationActionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigInitializationActions) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigInitializationActions or *ClusterConfigInitializationActions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigInitializationActions) + if !ok { + actualNotPointer, ok := a.(ClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigInitializationActions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ExecutableFile, actual.ExecutableFile, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutableFile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigEncryptionConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigEncryptionConfig or *ClusterConfigEncryptionConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigEncryptionConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigEncryptionConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GcePdKmsKeyName, actual.GcePdKmsKeyName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcePdKmsKeyName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigAutoscalingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigAutoscalingConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigAutoscalingConfig or *ClusterConfigAutoscalingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigAutoscalingConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigAutoscalingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Policy, actual.Policy, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecurityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecurityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfig or *ClusterConfigSecurityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecurityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KerberosConfig, actual.KerberosConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigKerberosConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfigKerberosConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KerberosConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityConfig, actual.IdentityConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigIdentityConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfigIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecurityConfigKerberosConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecurityConfigKerberosConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigKerberosConfig or *ClusterConfigSecurityConfigKerberosConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecurityConfigKerberosConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigKerberosConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableKerberos, actual.EnableKerberos, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableKerberos")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootPrincipalPassword, actual.RootPrincipalPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootPrincipalPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Keystore, actual.Keystore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Truststore, actual.Truststore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeystorePassword, actual.KeystorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeyPassword, actual.KeyPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TruststorePassword, actual.TruststorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustRealm, actual.CrossRealmTrustRealm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustRealm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustKdc, actual.CrossRealmTrustKdc, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustKdc")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustAdminServer, actual.CrossRealmTrustAdminServer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustAdminServer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustSharedPassword, actual.CrossRealmTrustSharedPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustSharedPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KdcDbKey, actual.KdcDbKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KdcDbKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TgtLifetimeHours, actual.TgtLifetimeHours, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TgtLifetimeHours")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Realm, actual.Realm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Realm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigSecurityConfigIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigSecurityConfigIdentityConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigSecurityConfigIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigIdentityConfig or *ClusterConfigSecurityConfigIdentityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigSecurityConfigIdentityConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigSecurityConfigIdentityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigIdentityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UserServiceAccountMapping, actual.UserServiceAccountMapping, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UserServiceAccountMapping")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigLifecycleConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigLifecycleConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigLifecycleConfig or *ClusterConfigLifecycleConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigLifecycleConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigLifecycleConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IdleDeleteTtl, actual.IdleDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTime, actual.AutoDeleteTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTtl, actual.AutoDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdleStartTime, actual.IdleStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleStartTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigEndpointConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigEndpointConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigEndpointConfig or *ClusterConfigEndpointConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigEndpointConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigEndpointConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpPorts, actual.HttpPorts, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpPorts")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHttpPortAccess, actual.EnableHttpPortAccess, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHttpPortAccess")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGkeClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfig or *ClusterConfigGkeClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGkeClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NamespacedGkeDeploymentTarget, actual.NamespacedGkeDeploymentTarget, dcl.DiffInfo{ObjectFunction: compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle, EmptyObject: EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NamespacedGkeDeploymentTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget or *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + actualNotPointer, ok := a.(ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetGkeCluster, actual.TargetGkeCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetGkeCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterNamespace, actual.ClusterNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigMetastoreConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMetastoreConfig or *ClusterConfigMetastoreConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigMetastoreConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigMetastoreConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigDataprocMetricConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigDataprocMetricConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigDataprocMetricConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfig or *ClusterConfigDataprocMetricConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigDataprocMetricConfig) + if !ok { + actualNotPointer, ok := a.(ClusterConfigDataprocMetricConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{ObjectFunction: compareClusterConfigDataprocMetricConfigMetricsNewStyle, EmptyObject: EmptyClusterConfigDataprocMetricConfigMetrics, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterConfigDataprocMetricConfigMetricsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterConfigDataprocMetricConfigMetrics) + if !ok { + desiredNotPointer, ok := d.(ClusterConfigDataprocMetricConfigMetrics) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfigMetrics or *ClusterConfigDataprocMetricConfigMetrics", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterConfigDataprocMetricConfigMetrics) + if !ok { + actualNotPointer, ok := a.(ClusterConfigDataprocMetricConfigMetrics) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfigMetrics", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MetricSource, actual.MetricSource, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricSource")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetricOverrides, actual.MetricOverrides, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricOverrides")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterStatus) + if !ok { + desiredNotPointer, ok := d.(ClusterStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterStatus or *ClusterStatus", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterStatus) + if !ok { + actualNotPointer, ok := a.(ClusterStatus) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterStatus", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Detail, actual.Detail, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Detail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StateStartTime, actual.StateStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StateStartTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Substate, actual.Substate, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Substate")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterStatusHistoryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterStatusHistory) + if !ok { + desiredNotPointer, ok := d.(ClusterStatusHistory) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterStatusHistory or *ClusterStatusHistory", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterStatusHistory) + if !ok { + actualNotPointer, ok := a.(ClusterStatusHistory) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterStatusHistory", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Detail, actual.Detail, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Detail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StateStartTime, actual.StateStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StateStartTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Substate, actual.Substate, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Substate")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterMetricsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterMetrics) + if !ok { + desiredNotPointer, ok := d.(ClusterMetrics) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMetrics or *ClusterMetrics", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterMetrics) + if !ok { + actualNotPointer, ok := a.(ClusterMetrics) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterMetrics", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HdfsMetrics, actual.HdfsMetrics, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HdfsMetrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.YarnMetrics, actual.YarnMetrics, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("YarnMetrics")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfig or *ClusterVirtualClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StagingBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubernetesClusterConfig, actual.KubernetesClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuxiliaryServicesConfig, actual.AuxiliaryServicesConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuxiliaryServicesConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfig or *ClusterVirtualClusterConfigKubernetesClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KubernetesNamespace, actual.KubernetesNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubernetesSoftwareConfig, actual.KubernetesSoftwareConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesSoftwareConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GkeClusterTarget, actual.GkeClusterTarget, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodePoolTarget, actual.NodePoolTarget, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePoolTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NodePool, actual.NodePool, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Roles, actual.Roles, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Roles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodePoolConfig, actual.NodePoolConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePoolConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Locations, actual.Locations, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Locations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LocalSsdCount, actual.LocalSsdCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptible, actual.Preemptible, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskKmsKey, actual.BootDiskKmsKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskKmsKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EphemeralStorageConfig, actual.EphemeralStorageConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EphemeralStorageConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Spot, actual.Spot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Spot")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GpuPartitionSize, actual.GpuPartitionSize, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GpuPartitionSize")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.LocalSsdCount, actual.LocalSsdCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ComponentVersion, actual.ComponentVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkHistoryServerConfig, actual.SparkHistoryServerConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkHistoryServerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) + if !ok { + desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) + if !ok { + actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataprocCluster, actual.DataprocCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Cluster) urlNormalized() *Cluster { + normalized := dcl.Copy(*r).(Cluster) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.ClusterUuid = dcl.SelfLinkToName(r.ClusterUuid) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateCluster" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Cluster resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Cluster) marshal(c *Client) ([]byte, error) { + m, err := expandCluster(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Cluster: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalCluster decodes JSON responses into the Cluster resource schema. +func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapCluster(m, c, res) +} + +func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { + + flattened := flattenCluster(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandCluster expands Cluster into a JSON request object. +func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into projectId: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["projectId"] = v + } + if v := f.Name; dcl.ValueShouldBeSent(v) { + m["clusterName"] = v + } + if v, err := expandClusterConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := expandClusterVirtualClusterConfig(c, f.VirtualClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding VirtualClusterConfig into virtualClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["virtualClusterConfig"] = v + } + + return m, nil +} + +// flattenCluster flattens Cluster from a JSON request object into the +// Cluster type. +func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Cluster{} + resultRes.Project = dcl.FlattenString(m["projectId"]) + resultRes.Name = dcl.FlattenString(m["clusterName"]) + resultRes.Config = flattenClusterConfig(c, m["config"], res) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Status = flattenClusterStatus(c, m["status"], res) + resultRes.StatusHistory = flattenClusterStatusHistorySlice(c, m["statusHistory"], res) + resultRes.ClusterUuid = dcl.FlattenString(m["clusterUuid"]) + resultRes.Metrics = flattenClusterMetrics(c, m["metrics"], res) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.VirtualClusterConfig = flattenClusterVirtualClusterConfig(c, m["virtualClusterConfig"], res) + + return resultRes +} + +// expandClusterConfigMap expands the contents of ClusterConfig into a JSON +// request object. +func expandClusterConfigMap(c *Client, f map[string]ClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSlice expands the contents of ClusterConfig into a JSON +// request object. +func expandClusterConfigSlice(c *Client, f []ClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMap flattens the contents of ClusterConfig from a JSON +// response object. +func flattenClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfig{} + } + + items := make(map[string]ClusterConfig) + for k, item := range a { + items[k] = *flattenClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSlice flattens the contents of ClusterConfig from a JSON +// response object. +func flattenClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfig{} + } + + if len(a) == 0 { + return []ClusterConfig{} + } + + items := make([]ClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfig expands an instance of ClusterConfig into a JSON +// request object. +func expandClusterConfig(c *Client, f *ClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { + m["configBucket"] = v + } + if v := f.TempBucket; !dcl.IsEmptyValueIndirect(v) { + m["tempBucket"] = v + } + if v, err := expandClusterConfigGceClusterConfig(c, f.GceClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GceClusterConfig into gceClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gceClusterConfig"] = v + } + if v, err := expandClusterConfigMasterConfig(c, f.MasterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MasterConfig into masterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["masterConfig"] = v + } + if v, err := expandClusterConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandClusterConfigSecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["secondaryWorkerConfig"] = v + } + if v, err := expandClusterConfigSoftwareConfig(c, f.SoftwareConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SoftwareConfig into softwareConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["softwareConfig"] = v + } + if v, err := expandClusterConfigInitializationActionsSlice(c, f.InitializationActions, res); err != nil { + return nil, fmt.Errorf("error expanding InitializationActions into initializationActions: %w", err) + } else if v != nil { + m["initializationActions"] = v + } + if v, err := expandClusterConfigEncryptionConfig(c, f.EncryptionConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["encryptionConfig"] = v + } + if v, err := expandClusterConfigAutoscalingConfig(c, f.AutoscalingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding AutoscalingConfig into autoscalingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscalingConfig"] = v + } + if v, err := expandClusterConfigSecurityConfig(c, f.SecurityConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecurityConfig into securityConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["securityConfig"] = v + } + if v, err := expandClusterConfigLifecycleConfig(c, f.LifecycleConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LifecycleConfig into lifecycleConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lifecycleConfig"] = v + } + if v, err := expandClusterConfigEndpointConfig(c, f.EndpointConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EndpointConfig into endpointConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["endpointConfig"] = v + } + if v, err := expandClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gkeClusterConfig"] = v + } + if v, err := expandClusterConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastoreConfig"] = v + } + if v, err := expandClusterConfigDataprocMetricConfig(c, f.DataprocMetricConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DataprocMetricConfig into dataprocMetricConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["dataprocMetricConfig"] = v + } + + return m, nil +} + +// flattenClusterConfig flattens an instance of ClusterConfig from a JSON +// response object. +func flattenClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfig + } + r.StagingBucket = dcl.FlattenString(m["configBucket"]) + r.TempBucket = dcl.FlattenString(m["tempBucket"]) + r.GceClusterConfig = flattenClusterConfigGceClusterConfig(c, m["gceClusterConfig"], res) + r.MasterConfig = flattenClusterConfigMasterConfig(c, m["masterConfig"], res) + r.WorkerConfig = flattenClusterConfigWorkerConfig(c, m["workerConfig"], res) + r.SecondaryWorkerConfig = flattenClusterConfigSecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) + r.SoftwareConfig = flattenClusterConfigSoftwareConfig(c, m["softwareConfig"], res) + r.InitializationActions = flattenClusterConfigInitializationActionsSlice(c, m["initializationActions"], res) + r.EncryptionConfig = flattenClusterConfigEncryptionConfig(c, m["encryptionConfig"], res) + r.AutoscalingConfig = flattenClusterConfigAutoscalingConfig(c, m["autoscalingConfig"], res) + r.SecurityConfig = flattenClusterConfigSecurityConfig(c, m["securityConfig"], res) + r.LifecycleConfig = flattenClusterConfigLifecycleConfig(c, m["lifecycleConfig"], res) + r.EndpointConfig = flattenClusterConfigEndpointConfig(c, m["endpointConfig"], res) + r.GkeClusterConfig = flattenClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) + r.MetastoreConfig = flattenClusterConfigMetastoreConfig(c, m["metastoreConfig"], res) + r.DataprocMetricConfig = flattenClusterConfigDataprocMetricConfig(c, m["dataprocMetricConfig"], res) + + return r +} + +// expandClusterConfigGceClusterConfigMap expands the contents of ClusterConfigGceClusterConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGceClusterConfigSlice expands the contents of ClusterConfigGceClusterConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigSlice(c *Client, f []ClusterConfigGceClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGceClusterConfigMap flattens the contents of ClusterConfigGceClusterConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfig{} + } + + items := make(map[string]ClusterConfigGceClusterConfig) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGceClusterConfigSlice flattens the contents of ClusterConfigGceClusterConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfig{} + } + + items := make([]ClusterConfigGceClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGceClusterConfig expands an instance of ClusterConfigGceClusterConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfig(c *Client, f *ClusterConfigGceClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { + m["zoneUri"] = v + } + if v := f.Network; !dcl.IsEmptyValueIndirect(v) { + m["networkUri"] = v + } + if v := f.Subnetwork; !dcl.IsEmptyValueIndirect(v) { + m["subnetworkUri"] = v + } + if v := f.InternalIPOnly; !dcl.IsEmptyValueIndirect(v) { + m["internalIpOnly"] = v + } + if v := f.PrivateIPv6GoogleAccess; !dcl.IsEmptyValueIndirect(v) { + m["privateIpv6GoogleAccess"] = v + } + if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccount"] = v + } + if v := f.ServiceAccountScopes; v != nil { + m["serviceAccountScopes"] = v + } + if v := f.Tags; v != nil { + m["tags"] = v + } + if v := f.Metadata; !dcl.IsEmptyValueIndirect(v) { + m["metadata"] = v + } + if v, err := expandClusterConfigGceClusterConfigReservationAffinity(c, f.ReservationAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding ReservationAffinity into reservationAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["reservationAffinity"] = v + } + if v, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, f.NodeGroupAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding NodeGroupAffinity into nodeGroupAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupAffinity"] = v + } + if v, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, f.ShieldedInstanceConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ShieldedInstanceConfig into shieldedInstanceConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["shieldedInstanceConfig"] = v + } + if v, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, f.ConfidentialInstanceConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ConfidentialInstanceConfig into confidentialInstanceConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["confidentialInstanceConfig"] = v + } + + return m, nil +} + +// flattenClusterConfigGceClusterConfig flattens an instance of ClusterConfigGceClusterConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGceClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGceClusterConfig + } + r.Zone = dcl.FlattenString(m["zoneUri"]) + r.Network = dcl.FlattenString(m["networkUri"]) + r.Subnetwork = dcl.FlattenString(m["subnetworkUri"]) + r.InternalIPOnly = dcl.FlattenBool(m["internalIpOnly"]) + r.PrivateIPv6GoogleAccess = flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(m["privateIpv6GoogleAccess"]) + r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + r.ServiceAccountScopes = dcl.FlattenStringSlice(m["serviceAccountScopes"]) + r.Tags = dcl.FlattenStringSlice(m["tags"]) + r.Metadata = dcl.FlattenKeyValuePairs(m["metadata"]) + r.ReservationAffinity = flattenClusterConfigGceClusterConfigReservationAffinity(c, m["reservationAffinity"], res) + r.NodeGroupAffinity = flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, m["nodeGroupAffinity"], res) + r.ShieldedInstanceConfig = flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, m["shieldedInstanceConfig"], res) + r.ConfidentialInstanceConfig = flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, m["confidentialInstanceConfig"], res) + + return r +} + +// expandClusterConfigGceClusterConfigReservationAffinityMap expands the contents of ClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigReservationAffinityMap(c *Client, f map[string]ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGceClusterConfigReservationAffinitySlice expands the contents of ClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, f []ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGceClusterConfigReservationAffinityMap flattens the contents of ClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigReservationAffinityMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make(map[string]ClusterConfigGceClusterConfigReservationAffinity) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGceClusterConfigReservationAffinitySlice flattens the contents of ClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGceClusterConfigReservationAffinity expands an instance of ClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigReservationAffinity(c *Client, f *ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ConsumeReservationType; !dcl.IsEmptyValueIndirect(v) { + m["consumeReservationType"] = v + } + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Values; v != nil { + m["values"] = v + } + + return m, nil +} + +// flattenClusterConfigGceClusterConfigReservationAffinity flattens an instance of ClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigReservationAffinity(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigReservationAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGceClusterConfigReservationAffinity + } + r.ConsumeReservationType = flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(m["consumeReservationType"]) + r.Key = dcl.FlattenString(m["key"]) + r.Values = dcl.FlattenStringSlice(m["values"]) + + return r +} + +// expandClusterConfigGceClusterConfigNodeGroupAffinityMap expands the contents of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, f map[string]ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGceClusterConfigNodeGroupAffinitySlice expands the contents of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, f []ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGceClusterConfigNodeGroupAffinityMap flattens the contents of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make(map[string]ClusterConfigGceClusterConfigNodeGroupAffinity) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGceClusterConfigNodeGroupAffinitySlice flattens the contents of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGceClusterConfigNodeGroupAffinity expands an instance of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, f *ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NodeGroup; !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupUri"] = v + } + + return m, nil +} + +// flattenClusterConfigGceClusterConfigNodeGroupAffinity flattens an instance of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigNodeGroupAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGceClusterConfigNodeGroupAffinity + } + r.NodeGroup = dcl.FlattenString(m["nodeGroupUri"]) + + return r +} + +// expandClusterConfigGceClusterConfigShieldedInstanceConfigMap expands the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGceClusterConfigShieldedInstanceConfigSlice expands the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, f []ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGceClusterConfigShieldedInstanceConfigMap flattens the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make(map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGceClusterConfigShieldedInstanceConfigSlice flattens the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGceClusterConfigShieldedInstanceConfig expands an instance of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, f *ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableSecureBoot; !dcl.IsEmptyValueIndirect(v) { + m["enableSecureBoot"] = v + } + if v := f.EnableVtpm; !dcl.IsEmptyValueIndirect(v) { + m["enableVtpm"] = v + } + if v := f.EnableIntegrityMonitoring; !dcl.IsEmptyValueIndirect(v) { + m["enableIntegrityMonitoring"] = v + } + + return m, nil +} + +// flattenClusterConfigGceClusterConfigShieldedInstanceConfig flattens an instance of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigShieldedInstanceConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGceClusterConfigShieldedInstanceConfig + } + r.EnableSecureBoot = dcl.FlattenBool(m["enableSecureBoot"]) + r.EnableVtpm = dcl.FlattenBool(m["enableVtpm"]) + r.EnableIntegrityMonitoring = dcl.FlattenBool(m["enableIntegrityMonitoring"]) + + return r +} + +// expandClusterConfigGceClusterConfigConfidentialInstanceConfigMap expands the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigConfidentialInstanceConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGceClusterConfigConfidentialInstanceConfigSlice expands the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, f []ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGceClusterConfigConfidentialInstanceConfigMap flattens the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigConfidentialInstanceConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + + items := make(map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGceClusterConfigConfidentialInstanceConfigSlice flattens the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + + items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGceClusterConfigConfidentialInstanceConfig expands an instance of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON +// request object. +func expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, f *ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableConfidentialCompute; !dcl.IsEmptyValueIndirect(v) { + m["enableConfidentialCompute"] = v + } + + return m, nil +} + +// flattenClusterConfigGceClusterConfigConfidentialInstanceConfig flattens an instance of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON +// response object. +func flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig + } + r.EnableConfidentialCompute = dcl.FlattenBool(m["enableConfidentialCompute"]) + + return r +} + +// expandClusterConfigMasterConfigMap expands the contents of ClusterConfigMasterConfig into a JSON +// request object. +func expandClusterConfigMasterConfigMap(c *Client, f map[string]ClusterConfigMasterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMasterConfigSlice expands the contents of ClusterConfigMasterConfig into a JSON +// request object. +func expandClusterConfigMasterConfigSlice(c *Client, f []ClusterConfigMasterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMasterConfigMap flattens the contents of ClusterConfigMasterConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfig{} + } + + items := make(map[string]ClusterConfigMasterConfig) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMasterConfigSlice flattens the contents of ClusterConfigMasterConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfig{} + } + + items := make([]ClusterConfigMasterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMasterConfig expands an instance of ClusterConfigMasterConfig into a JSON +// request object. +func expandClusterConfigMasterConfig(c *Client, f *ClusterConfigMasterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandClusterConfigMasterConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandClusterConfigMasterConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenClusterConfigMasterConfig flattens an instance of ClusterConfigMasterConfig from a JSON +// response object. +func flattenClusterConfigMasterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMasterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMasterConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenClusterConfigMasterConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenClusterConfigMasterConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenClusterConfigMasterConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenClusterConfigMasterConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + r.InstanceReferences = flattenClusterConfigMasterConfigInstanceReferencesSlice(c, m["instanceReferences"], res) + + return r +} + +// expandClusterConfigMasterConfigDiskConfigMap expands the contents of ClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandClusterConfigMasterConfigDiskConfigMap(c *Client, f map[string]ClusterConfigMasterConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMasterConfigDiskConfigSlice expands the contents of ClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandClusterConfigMasterConfigDiskConfigSlice(c *Client, f []ClusterConfigMasterConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMasterConfigDiskConfigMap flattens the contents of ClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfigDiskConfig{} + } + + items := make(map[string]ClusterConfigMasterConfigDiskConfig) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMasterConfigDiskConfigSlice flattens the contents of ClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfigDiskConfig{} + } + + items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMasterConfigDiskConfig expands an instance of ClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandClusterConfigMasterConfigDiskConfig(c *Client, f *ClusterConfigMasterConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { + m["localSsdInterface"] = v + } + + return m, nil +} + +// flattenClusterConfigMasterConfigDiskConfig flattens an instance of ClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMasterConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMasterConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) + + return r +} + +// expandClusterConfigMasterConfigManagedGroupConfigMap expands the contents of ClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigMasterConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMasterConfigManagedGroupConfigSlice expands the contents of ClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMasterConfigManagedGroupConfigMap flattens the contents of ClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make(map[string]ClusterConfigMasterConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMasterConfigManagedGroupConfigSlice flattens the contents of ClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMasterConfigManagedGroupConfig expands an instance of ClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigMasterConfigManagedGroupConfig(c *Client, f *ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenClusterConfigMasterConfigManagedGroupConfig flattens an instance of ClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigMasterConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMasterConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMasterConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandClusterConfigMasterConfigAcceleratorsMap expands the contents of ClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandClusterConfigMasterConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigMasterConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMasterConfigAcceleratorsSlice expands the contents of ClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandClusterConfigMasterConfigAcceleratorsSlice(c *Client, f []ClusterConfigMasterConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMasterConfigAcceleratorsMap flattens the contents of ClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenClusterConfigMasterConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfigAccelerators{} + } + + items := make(map[string]ClusterConfigMasterConfigAccelerators) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMasterConfigAcceleratorsSlice flattens the contents of ClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenClusterConfigMasterConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfigAccelerators{} + } + + items := make([]ClusterConfigMasterConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMasterConfigAccelerators expands an instance of ClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandClusterConfigMasterConfigAccelerators(c *Client, f *ClusterConfigMasterConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenClusterConfigMasterConfigAccelerators flattens an instance of ClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenClusterConfigMasterConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMasterConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMasterConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandClusterConfigMasterConfigInstanceReferencesMap expands the contents of ClusterConfigMasterConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigMasterConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigMasterConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMasterConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMasterConfigInstanceReferencesSlice expands the contents of ClusterConfigMasterConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigMasterConfigInstanceReferencesSlice(c *Client, f []ClusterConfigMasterConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMasterConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMasterConfigInstanceReferencesMap flattens the contents of ClusterConfigMasterConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigMasterConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigInstanceReferences { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfigInstanceReferences{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfigInstanceReferences{} + } + + items := make(map[string]ClusterConfigMasterConfigInstanceReferences) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfigInstanceReferences(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMasterConfigInstanceReferencesSlice flattens the contents of ClusterConfigMasterConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigMasterConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigInstanceReferences { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfigInstanceReferences{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfigInstanceReferences{} + } + + items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfigInstanceReferences(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMasterConfigInstanceReferences expands an instance of ClusterConfigMasterConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigMasterConfigInstanceReferences(c *Client, f *ClusterConfigMasterConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { + m["instanceName"] = v + } + if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { + m["instanceId"] = v + } + if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { + m["publicKey"] = v + } + if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { + m["publicEciesKey"] = v + } + + return m, nil +} + +// flattenClusterConfigMasterConfigInstanceReferences flattens an instance of ClusterConfigMasterConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigMasterConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigInstanceReferences { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMasterConfigInstanceReferences{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMasterConfigInstanceReferences + } + r.InstanceName = dcl.FlattenString(m["instanceName"]) + r.InstanceId = dcl.FlattenString(m["instanceId"]) + r.PublicKey = dcl.FlattenString(m["publicKey"]) + r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) + + return r +} + +// expandClusterConfigWorkerConfigMap expands the contents of ClusterConfigWorkerConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigMap(c *Client, f map[string]ClusterConfigWorkerConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigWorkerConfigSlice expands the contents of ClusterConfigWorkerConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigSlice(c *Client, f []ClusterConfigWorkerConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigWorkerConfigMap flattens the contents of ClusterConfigWorkerConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfig{} + } + + items := make(map[string]ClusterConfigWorkerConfig) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigWorkerConfigSlice flattens the contents of ClusterConfigWorkerConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfig{} + } + + items := make([]ClusterConfigWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigWorkerConfig expands an instance of ClusterConfigWorkerConfig into a JSON +// request object. +func expandClusterConfigWorkerConfig(c *Client, f *ClusterConfigWorkerConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandClusterConfigWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandClusterConfigWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenClusterConfigWorkerConfig flattens an instance of ClusterConfigWorkerConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenClusterConfigWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenClusterConfigWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenClusterConfigWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenClusterConfigWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + r.InstanceReferences = flattenClusterConfigWorkerConfigInstanceReferencesSlice(c, m["instanceReferences"], res) + + return r +} + +// expandClusterConfigWorkerConfigDiskConfigMap expands the contents of ClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigDiskConfigMap(c *Client, f map[string]ClusterConfigWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigWorkerConfigDiskConfigSlice expands the contents of ClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigDiskConfigSlice(c *Client, f []ClusterConfigWorkerConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigWorkerConfigDiskConfigMap flattens the contents of ClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfigDiskConfig{} + } + + items := make(map[string]ClusterConfigWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigWorkerConfigDiskConfigSlice flattens the contents of ClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfigDiskConfig{} + } + + items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigWorkerConfigDiskConfig expands an instance of ClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigDiskConfig(c *Client, f *ClusterConfigWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { + m["localSsdInterface"] = v + } + + return m, nil +} + +// flattenClusterConfigWorkerConfigDiskConfig flattens an instance of ClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) + + return r +} + +// expandClusterConfigWorkerConfigManagedGroupConfigMap expands the contents of ClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigWorkerConfigManagedGroupConfigSlice expands the contents of ClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigWorkerConfigManagedGroupConfigMap flattens the contents of ClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]ClusterConfigWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigWorkerConfigManagedGroupConfigSlice flattens the contents of ClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigWorkerConfigManagedGroupConfig expands an instance of ClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigWorkerConfigManagedGroupConfig(c *Client, f *ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenClusterConfigWorkerConfigManagedGroupConfig flattens an instance of ClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandClusterConfigWorkerConfigAcceleratorsMap expands the contents of ClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigWorkerConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigWorkerConfigAcceleratorsSlice expands the contents of ClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigWorkerConfigAcceleratorsSlice(c *Client, f []ClusterConfigWorkerConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigWorkerConfigAcceleratorsMap flattens the contents of ClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfigAccelerators{} + } + + items := make(map[string]ClusterConfigWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigWorkerConfigAcceleratorsSlice flattens the contents of ClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfigAccelerators{} + } + + items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigWorkerConfigAccelerators expands an instance of ClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigWorkerConfigAccelerators(c *Client, f *ClusterConfigWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenClusterConfigWorkerConfigAccelerators flattens an instance of ClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigWorkerConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandClusterConfigWorkerConfigInstanceReferencesMap expands the contents of ClusterConfigWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigWorkerConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigWorkerConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigWorkerConfigInstanceReferencesSlice expands the contents of ClusterConfigWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, f []ClusterConfigWorkerConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigWorkerConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigWorkerConfigInstanceReferencesMap flattens the contents of ClusterConfigWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigWorkerConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigInstanceReferences { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfigInstanceReferences{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfigInstanceReferences{} + } + + items := make(map[string]ClusterConfigWorkerConfigInstanceReferences) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigWorkerConfigInstanceReferencesSlice flattens the contents of ClusterConfigWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigInstanceReferences { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfigInstanceReferences{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfigInstanceReferences{} + } + + items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigWorkerConfigInstanceReferences expands an instance of ClusterConfigWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigWorkerConfigInstanceReferences(c *Client, f *ClusterConfigWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { + m["instanceName"] = v + } + if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { + m["instanceId"] = v + } + if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { + m["publicKey"] = v + } + if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { + m["publicEciesKey"] = v + } + + return m, nil +} + +// flattenClusterConfigWorkerConfigInstanceReferences flattens an instance of ClusterConfigWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigWorkerConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigInstanceReferences { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigWorkerConfigInstanceReferences{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigWorkerConfigInstanceReferences + } + r.InstanceName = dcl.FlattenString(m["instanceName"]) + r.InstanceId = dcl.FlattenString(m["instanceId"]) + r.PublicKey = dcl.FlattenString(m["publicKey"]) + r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) + + return r +} + +// expandClusterConfigSecondaryWorkerConfigMap expands the contents of ClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecondaryWorkerConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecondaryWorkerConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfig{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfig{} + } + + items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecondaryWorkerConfig expands an instance of ClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfig(c *Client, f *ClusterConfigSecondaryWorkerConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenClusterConfigSecondaryWorkerConfig flattens an instance of ClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecondaryWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecondaryWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + r.InstanceReferences = flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c, m["instanceReferences"], res) + + return r +} + +// expandClusterConfigSecondaryWorkerConfigDiskConfigMap expands the contents of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecondaryWorkerConfigDiskConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecondaryWorkerConfigDiskConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigDiskConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecondaryWorkerConfigDiskConfig expands an instance of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, f *ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { + m["localSsdInterface"] = v + } + + return m, nil +} + +// flattenClusterConfigSecondaryWorkerConfigDiskConfig flattens an instance of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecondaryWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) + + return r +} + +// expandClusterConfigSecondaryWorkerConfigManagedGroupConfigMap expands the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecondaryWorkerConfigManagedGroupConfig expands an instance of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, f *ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig flattens an instance of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandClusterConfigSecondaryWorkerConfigAcceleratorsMap expands the contents of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice expands the contents of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecondaryWorkerConfigAcceleratorsMap flattens the contents of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice flattens the contents of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecondaryWorkerConfigAccelerators expands an instance of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigAccelerators(c *Client, f *ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenClusterConfigSecondaryWorkerConfigAccelerators flattens an instance of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecondaryWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandClusterConfigSecondaryWorkerConfigInstanceReferencesMap expands the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecondaryWorkerConfigInstanceReferencesSlice expands the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecondaryWorkerConfigInstanceReferences(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecondaryWorkerConfigInstanceReferencesMap flattens the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice flattens the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigInstanceReferences { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfigInstanceReferences{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfigInstanceReferences{} + } + + items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecondaryWorkerConfigInstanceReferences expands an instance of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON +// request object. +func expandClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, f *ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { + m["instanceName"] = v + } + if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { + m["instanceId"] = v + } + if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { + m["publicKey"] = v + } + if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { + m["publicEciesKey"] = v + } + + return m, nil +} + +// flattenClusterConfigSecondaryWorkerConfigInstanceReferences flattens an instance of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigInstanceReferences { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecondaryWorkerConfigInstanceReferences{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecondaryWorkerConfigInstanceReferences + } + r.InstanceName = dcl.FlattenString(m["instanceName"]) + r.InstanceId = dcl.FlattenString(m["instanceId"]) + r.PublicKey = dcl.FlattenString(m["publicKey"]) + r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) + + return r +} + +// expandClusterConfigSoftwareConfigMap expands the contents of ClusterConfigSoftwareConfig into a JSON +// request object. +func expandClusterConfigSoftwareConfigMap(c *Client, f map[string]ClusterConfigSoftwareConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSoftwareConfigSlice expands the contents of ClusterConfigSoftwareConfig into a JSON +// request object. +func expandClusterConfigSoftwareConfigSlice(c *Client, f []ClusterConfigSoftwareConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSoftwareConfigMap flattens the contents of ClusterConfigSoftwareConfig from a JSON +// response object. +func flattenClusterConfigSoftwareConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSoftwareConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSoftwareConfig{} + } + + items := make(map[string]ClusterConfigSoftwareConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSoftwareConfigSlice flattens the contents of ClusterConfigSoftwareConfig from a JSON +// response object. +func flattenClusterConfigSoftwareConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSoftwareConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSoftwareConfig{} + } + + items := make([]ClusterConfigSoftwareConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSoftwareConfig expands an instance of ClusterConfigSoftwareConfig into a JSON +// request object. +func expandClusterConfigSoftwareConfig(c *Client, f *ClusterConfigSoftwareConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ImageVersion; !dcl.IsEmptyValueIndirect(v) { + m["imageVersion"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.OptionalComponents; v != nil { + m["optionalComponents"] = v + } + + return m, nil +} + +// flattenClusterConfigSoftwareConfig flattens an instance of ClusterConfigSoftwareConfig from a JSON +// response object. +func flattenClusterConfigSoftwareConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSoftwareConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSoftwareConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSoftwareConfig + } + r.ImageVersion = dcl.FlattenString(m["imageVersion"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.OptionalComponents = flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c, m["optionalComponents"], res) + + return r +} + +// expandClusterConfigInitializationActionsMap expands the contents of ClusterConfigInitializationActions into a JSON +// request object. +func expandClusterConfigInitializationActionsMap(c *Client, f map[string]ClusterConfigInitializationActions, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigInitializationActionsSlice expands the contents of ClusterConfigInitializationActions into a JSON +// request object. +func expandClusterConfigInitializationActionsSlice(c *Client, f []ClusterConfigInitializationActions, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigInitializationActionsMap flattens the contents of ClusterConfigInitializationActions from a JSON +// response object. +func flattenClusterConfigInitializationActionsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigInitializationActions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return map[string]ClusterConfigInitializationActions{} + } + + items := make(map[string]ClusterConfigInitializationActions) + for k, item := range a { + items[k] = *flattenClusterConfigInitializationActions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigInitializationActionsSlice flattens the contents of ClusterConfigInitializationActions from a JSON +// response object. +func flattenClusterConfigInitializationActionsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigInitializationActions { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return []ClusterConfigInitializationActions{} + } + + items := make([]ClusterConfigInitializationActions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigInitializationActions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigInitializationActions expands an instance of ClusterConfigInitializationActions into a JSON +// request object. +func expandClusterConfigInitializationActions(c *Client, f *ClusterConfigInitializationActions, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ExecutableFile; !dcl.IsEmptyValueIndirect(v) { + m["executableFile"] = v + } + if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["executionTimeout"] = v + } + + return m, nil +} + +// flattenClusterConfigInitializationActions flattens an instance of ClusterConfigInitializationActions from a JSON +// response object. +func flattenClusterConfigInitializationActions(c *Client, i interface{}, res *Cluster) *ClusterConfigInitializationActions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigInitializationActions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigInitializationActions + } + r.ExecutableFile = dcl.FlattenString(m["executableFile"]) + r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) + + return r +} + +// expandClusterConfigEncryptionConfigMap expands the contents of ClusterConfigEncryptionConfig into a JSON +// request object. +func expandClusterConfigEncryptionConfigMap(c *Client, f map[string]ClusterConfigEncryptionConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigEncryptionConfigSlice expands the contents of ClusterConfigEncryptionConfig into a JSON +// request object. +func expandClusterConfigEncryptionConfigSlice(c *Client, f []ClusterConfigEncryptionConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigEncryptionConfigMap flattens the contents of ClusterConfigEncryptionConfig from a JSON +// response object. +func flattenClusterConfigEncryptionConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigEncryptionConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigEncryptionConfig{} + } + + items := make(map[string]ClusterConfigEncryptionConfig) + for k, item := range a { + items[k] = *flattenClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigEncryptionConfigSlice flattens the contents of ClusterConfigEncryptionConfig from a JSON +// response object. +func flattenClusterConfigEncryptionConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigEncryptionConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return []ClusterConfigEncryptionConfig{} + } + + items := make([]ClusterConfigEncryptionConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigEncryptionConfig expands an instance of ClusterConfigEncryptionConfig into a JSON +// request object. +func expandClusterConfigEncryptionConfig(c *Client, f *ClusterConfigEncryptionConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.GcePdKmsKeyName; !dcl.IsEmptyValueIndirect(v) { + m["gcePdKmsKeyName"] = v + } + + return m, nil +} + +// flattenClusterConfigEncryptionConfig flattens an instance of ClusterConfigEncryptionConfig from a JSON +// response object. +func flattenClusterConfigEncryptionConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigEncryptionConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigEncryptionConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigEncryptionConfig + } + r.GcePdKmsKeyName = dcl.FlattenString(m["gcePdKmsKeyName"]) + + return r +} + +// expandClusterConfigAutoscalingConfigMap expands the contents of ClusterConfigAutoscalingConfig into a JSON +// request object. +func expandClusterConfigAutoscalingConfigMap(c *Client, f map[string]ClusterConfigAutoscalingConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigAutoscalingConfigSlice expands the contents of ClusterConfigAutoscalingConfig into a JSON +// request object. +func expandClusterConfigAutoscalingConfigSlice(c *Client, f []ClusterConfigAutoscalingConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigAutoscalingConfigMap flattens the contents of ClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenClusterConfigAutoscalingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigAutoscalingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigAutoscalingConfig{} + } + + items := make(map[string]ClusterConfigAutoscalingConfig) + for k, item := range a { + items[k] = *flattenClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigAutoscalingConfigSlice flattens the contents of ClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenClusterConfigAutoscalingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigAutoscalingConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return []ClusterConfigAutoscalingConfig{} + } + + items := make([]ClusterConfigAutoscalingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigAutoscalingConfig expands an instance of ClusterConfigAutoscalingConfig into a JSON +// request object. +func expandClusterConfigAutoscalingConfig(c *Client, f *ClusterConfigAutoscalingConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Policy; !dcl.IsEmptyValueIndirect(v) { + m["policyUri"] = v + } + + return m, nil +} + +// flattenClusterConfigAutoscalingConfig flattens an instance of ClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenClusterConfigAutoscalingConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigAutoscalingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigAutoscalingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigAutoscalingConfig + } + r.Policy = dcl.FlattenString(m["policyUri"]) + + return r +} + +// expandClusterConfigSecurityConfigMap expands the contents of ClusterConfigSecurityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigMap(c *Client, f map[string]ClusterConfigSecurityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecurityConfigSlice expands the contents of ClusterConfigSecurityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigSlice(c *Client, f []ClusterConfigSecurityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecurityConfigMap flattens the contents of ClusterConfigSecurityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecurityConfig{} + } + + items := make(map[string]ClusterConfigSecurityConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecurityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecurityConfigSlice flattens the contents of ClusterConfigSecurityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecurityConfig{} + } + + items := make([]ClusterConfigSecurityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecurityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecurityConfig expands an instance of ClusterConfigSecurityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfig(c *Client, f *ClusterConfigSecurityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterConfigSecurityConfigKerberosConfig(c, f.KerberosConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KerberosConfig into kerberosConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kerberosConfig"] = v + } + if v, err := expandClusterConfigSecurityConfigIdentityConfig(c, f.IdentityConfig, res); err != nil { + return nil, fmt.Errorf("error expanding IdentityConfig into identityConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["identityConfig"] = v + } + + return m, nil +} + +// flattenClusterConfigSecurityConfig flattens an instance of ClusterConfigSecurityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecurityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecurityConfig + } + r.KerberosConfig = flattenClusterConfigSecurityConfigKerberosConfig(c, m["kerberosConfig"], res) + r.IdentityConfig = flattenClusterConfigSecurityConfigIdentityConfig(c, m["identityConfig"], res) + + return r +} + +// expandClusterConfigSecurityConfigKerberosConfigMap expands the contents of ClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigKerberosConfigMap(c *Client, f map[string]ClusterConfigSecurityConfigKerberosConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecurityConfigKerberosConfigSlice expands the contents of ClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigKerberosConfigSlice(c *Client, f []ClusterConfigSecurityConfigKerberosConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecurityConfigKerberosConfigMap flattens the contents of ClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigKerberosConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfigKerberosConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecurityConfigKerberosConfig{} + } + + items := make(map[string]ClusterConfigSecurityConfigKerberosConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecurityConfigKerberosConfigSlice flattens the contents of ClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigKerberosConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfigKerberosConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecurityConfigKerberosConfig{} + } + + items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecurityConfigKerberosConfig expands an instance of ClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigKerberosConfig(c *Client, f *ClusterConfigSecurityConfigKerberosConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableKerberos; !dcl.IsEmptyValueIndirect(v) { + m["enableKerberos"] = v + } + if v := f.RootPrincipalPassword; !dcl.IsEmptyValueIndirect(v) { + m["rootPrincipalPasswordUri"] = v + } + if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyUri"] = v + } + if v := f.Keystore; !dcl.IsEmptyValueIndirect(v) { + m["keystoreUri"] = v + } + if v := f.Truststore; !dcl.IsEmptyValueIndirect(v) { + m["truststoreUri"] = v + } + if v := f.KeystorePassword; !dcl.IsEmptyValueIndirect(v) { + m["keystorePasswordUri"] = v + } + if v := f.KeyPassword; !dcl.IsEmptyValueIndirect(v) { + m["keyPasswordUri"] = v + } + if v := f.TruststorePassword; !dcl.IsEmptyValueIndirect(v) { + m["truststorePasswordUri"] = v + } + if v := f.CrossRealmTrustRealm; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustRealm"] = v + } + if v := f.CrossRealmTrustKdc; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustKdc"] = v + } + if v := f.CrossRealmTrustAdminServer; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustAdminServer"] = v + } + if v := f.CrossRealmTrustSharedPassword; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustSharedPasswordUri"] = v + } + if v := f.KdcDbKey; !dcl.IsEmptyValueIndirect(v) { + m["kdcDbKeyUri"] = v + } + if v := f.TgtLifetimeHours; !dcl.IsEmptyValueIndirect(v) { + m["tgtLifetimeHours"] = v + } + if v := f.Realm; !dcl.IsEmptyValueIndirect(v) { + m["realm"] = v + } + + return m, nil +} + +// flattenClusterConfigSecurityConfigKerberosConfig flattens an instance of ClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigKerberosConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfigKerberosConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecurityConfigKerberosConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecurityConfigKerberosConfig + } + r.EnableKerberos = dcl.FlattenBool(m["enableKerberos"]) + r.RootPrincipalPassword = dcl.FlattenString(m["rootPrincipalPasswordUri"]) + r.KmsKey = dcl.FlattenString(m["kmsKeyUri"]) + r.Keystore = dcl.FlattenString(m["keystoreUri"]) + r.Truststore = dcl.FlattenString(m["truststoreUri"]) + r.KeystorePassword = dcl.FlattenString(m["keystorePasswordUri"]) + r.KeyPassword = dcl.FlattenString(m["keyPasswordUri"]) + r.TruststorePassword = dcl.FlattenString(m["truststorePasswordUri"]) + r.CrossRealmTrustRealm = dcl.FlattenString(m["crossRealmTrustRealm"]) + r.CrossRealmTrustKdc = dcl.FlattenString(m["crossRealmTrustKdc"]) + r.CrossRealmTrustAdminServer = dcl.FlattenString(m["crossRealmTrustAdminServer"]) + r.CrossRealmTrustSharedPassword = dcl.FlattenString(m["crossRealmTrustSharedPasswordUri"]) + r.KdcDbKey = dcl.FlattenString(m["kdcDbKeyUri"]) + r.TgtLifetimeHours = dcl.FlattenInteger(m["tgtLifetimeHours"]) + r.Realm = dcl.FlattenString(m["realm"]) + + return r +} + +// expandClusterConfigSecurityConfigIdentityConfigMap expands the contents of ClusterConfigSecurityConfigIdentityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigIdentityConfigMap(c *Client, f map[string]ClusterConfigSecurityConfigIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigSecurityConfigIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigSecurityConfigIdentityConfigSlice expands the contents of ClusterConfigSecurityConfigIdentityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigIdentityConfigSlice(c *Client, f []ClusterConfigSecurityConfigIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigSecurityConfigIdentityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigSecurityConfigIdentityConfigMap flattens the contents of ClusterConfigSecurityConfigIdentityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfigIdentityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecurityConfigIdentityConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecurityConfigIdentityConfig{} + } + + items := make(map[string]ClusterConfigSecurityConfigIdentityConfig) + for k, item := range a { + items[k] = *flattenClusterConfigSecurityConfigIdentityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigSecurityConfigIdentityConfigSlice flattens the contents of ClusterConfigSecurityConfigIdentityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfigIdentityConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecurityConfigIdentityConfig{} + } + + if len(a) == 0 { + return []ClusterConfigSecurityConfigIdentityConfig{} + } + + items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecurityConfigIdentityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigSecurityConfigIdentityConfig expands an instance of ClusterConfigSecurityConfigIdentityConfig into a JSON +// request object. +func expandClusterConfigSecurityConfigIdentityConfig(c *Client, f *ClusterConfigSecurityConfigIdentityConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UserServiceAccountMapping; !dcl.IsEmptyValueIndirect(v) { + m["userServiceAccountMapping"] = v + } + + return m, nil +} + +// flattenClusterConfigSecurityConfigIdentityConfig flattens an instance of ClusterConfigSecurityConfigIdentityConfig from a JSON +// response object. +func flattenClusterConfigSecurityConfigIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfigIdentityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigSecurityConfigIdentityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigSecurityConfigIdentityConfig + } + r.UserServiceAccountMapping = dcl.FlattenKeyValuePairs(m["userServiceAccountMapping"]) + + return r +} + +// expandClusterConfigLifecycleConfigMap expands the contents of ClusterConfigLifecycleConfig into a JSON +// request object. +func expandClusterConfigLifecycleConfigMap(c *Client, f map[string]ClusterConfigLifecycleConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigLifecycleConfigSlice expands the contents of ClusterConfigLifecycleConfig into a JSON +// request object. +func expandClusterConfigLifecycleConfigSlice(c *Client, f []ClusterConfigLifecycleConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigLifecycleConfigMap flattens the contents of ClusterConfigLifecycleConfig from a JSON +// response object. +func flattenClusterConfigLifecycleConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigLifecycleConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigLifecycleConfig{} + } + + items := make(map[string]ClusterConfigLifecycleConfig) + for k, item := range a { + items[k] = *flattenClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigLifecycleConfigSlice flattens the contents of ClusterConfigLifecycleConfig from a JSON +// response object. +func flattenClusterConfigLifecycleConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigLifecycleConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return []ClusterConfigLifecycleConfig{} + } + + items := make([]ClusterConfigLifecycleConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigLifecycleConfig expands an instance of ClusterConfigLifecycleConfig into a JSON +// request object. +func expandClusterConfigLifecycleConfig(c *Client, f *ClusterConfigLifecycleConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IdleDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["idleDeleteTtl"] = v + } + if v := f.AutoDeleteTime; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTime"] = v + } + if v := f.AutoDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTtl"] = v + } + + return m, nil +} + +// flattenClusterConfigLifecycleConfig flattens an instance of ClusterConfigLifecycleConfig from a JSON +// response object. +func flattenClusterConfigLifecycleConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigLifecycleConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigLifecycleConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigLifecycleConfig + } + r.IdleDeleteTtl = dcl.FlattenString(m["idleDeleteTtl"]) + r.AutoDeleteTime = dcl.FlattenString(m["autoDeleteTime"]) + r.AutoDeleteTtl = dcl.FlattenString(m["autoDeleteTtl"]) + r.IdleStartTime = dcl.FlattenString(m["idleStartTime"]) + + return r +} + +// expandClusterConfigEndpointConfigMap expands the contents of ClusterConfigEndpointConfig into a JSON +// request object. +func expandClusterConfigEndpointConfigMap(c *Client, f map[string]ClusterConfigEndpointConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigEndpointConfigSlice expands the contents of ClusterConfigEndpointConfig into a JSON +// request object. +func expandClusterConfigEndpointConfigSlice(c *Client, f []ClusterConfigEndpointConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigEndpointConfigMap flattens the contents of ClusterConfigEndpointConfig from a JSON +// response object. +func flattenClusterConfigEndpointConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigEndpointConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigEndpointConfig{} + } + + items := make(map[string]ClusterConfigEndpointConfig) + for k, item := range a { + items[k] = *flattenClusterConfigEndpointConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigEndpointConfigSlice flattens the contents of ClusterConfigEndpointConfig from a JSON +// response object. +func flattenClusterConfigEndpointConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigEndpointConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return []ClusterConfigEndpointConfig{} + } + + items := make([]ClusterConfigEndpointConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigEndpointConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigEndpointConfig expands an instance of ClusterConfigEndpointConfig into a JSON +// request object. +func expandClusterConfigEndpointConfig(c *Client, f *ClusterConfigEndpointConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableHttpPortAccess; !dcl.IsEmptyValueIndirect(v) { + m["enableHttpPortAccess"] = v + } + + return m, nil +} + +// flattenClusterConfigEndpointConfig flattens an instance of ClusterConfigEndpointConfig from a JSON +// response object. +func flattenClusterConfigEndpointConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigEndpointConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigEndpointConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigEndpointConfig + } + r.HttpPorts = dcl.FlattenKeyValuePairs(m["httpPorts"]) + r.EnableHttpPortAccess = dcl.FlattenBool(m["enableHttpPortAccess"]) + + return r +} + +// expandClusterConfigGkeClusterConfigMap expands the contents of ClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterConfigGkeClusterConfigMap(c *Client, f map[string]ClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGkeClusterConfigSlice expands the contents of ClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterConfigGkeClusterConfigSlice(c *Client, f []ClusterConfigGkeClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGkeClusterConfigMap flattens the contents of ClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGkeClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGkeClusterConfig{} + } + + items := make(map[string]ClusterConfigGkeClusterConfig) + for k, item := range a { + items[k] = *flattenClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGkeClusterConfigSlice flattens the contents of ClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGkeClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return []ClusterConfigGkeClusterConfig{} + } + + items := make([]ClusterConfigGkeClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGkeClusterConfig expands an instance of ClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterConfigGkeClusterConfig(c *Client, f *ClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, f.NamespacedGkeDeploymentTarget, res); err != nil { + return nil, fmt.Errorf("error expanding NamespacedGkeDeploymentTarget into namespacedGkeDeploymentTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["namespacedGkeDeploymentTarget"] = v + } + + return m, nil +} + +// flattenClusterConfigGkeClusterConfig flattens an instance of ClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterConfigGkeClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGkeClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGkeClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGkeClusterConfig + } + r.NamespacedGkeDeploymentTarget = flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, m["namespacedGkeDeploymentTarget"], res) + + return r +} + +// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap expands the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, f map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice expands the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, f []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap flattens the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make(map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + for k, item := range a { + items[k] = *flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice flattens the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget expands an instance of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, f *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetGkeCluster; !dcl.IsEmptyValueIndirect(v) { + m["targetGkeCluster"] = v + } + if v := f.ClusterNamespace; !dcl.IsEmptyValueIndirect(v) { + m["clusterNamespace"] = v + } + + return m, nil +} + +// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget flattens an instance of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, i interface{}, res *Cluster) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + r.TargetGkeCluster = dcl.FlattenString(m["targetGkeCluster"]) + r.ClusterNamespace = dcl.FlattenString(m["clusterNamespace"]) + + return r +} + +// expandClusterConfigMetastoreConfigMap expands the contents of ClusterConfigMetastoreConfig into a JSON +// request object. +func expandClusterConfigMetastoreConfigMap(c *Client, f map[string]ClusterConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigMetastoreConfigSlice expands the contents of ClusterConfigMetastoreConfig into a JSON +// request object. +func expandClusterConfigMetastoreConfigSlice(c *Client, f []ClusterConfigMetastoreConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigMetastoreConfigMap flattens the contents of ClusterConfigMetastoreConfig from a JSON +// response object. +func flattenClusterConfigMetastoreConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMetastoreConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMetastoreConfig{} + } + + items := make(map[string]ClusterConfigMetastoreConfig) + for k, item := range a { + items[k] = *flattenClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigMetastoreConfigSlice flattens the contents of ClusterConfigMetastoreConfig from a JSON +// response object. +func flattenClusterConfigMetastoreConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMetastoreConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return []ClusterConfigMetastoreConfig{} + } + + items := make([]ClusterConfigMetastoreConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigMetastoreConfig expands an instance of ClusterConfigMetastoreConfig into a JSON +// request object. +func expandClusterConfigMetastoreConfig(c *Client, f *ClusterConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { + m["dataprocMetastoreService"] = v + } + + return m, nil +} + +// flattenClusterConfigMetastoreConfig flattens an instance of ClusterConfigMetastoreConfig from a JSON +// response object. +func flattenClusterConfigMetastoreConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMetastoreConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigMetastoreConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigMetastoreConfig + } + r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) + + return r +} + +// expandClusterConfigDataprocMetricConfigMap expands the contents of ClusterConfigDataprocMetricConfig into a JSON +// request object. +func expandClusterConfigDataprocMetricConfigMap(c *Client, f map[string]ClusterConfigDataprocMetricConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigDataprocMetricConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigDataprocMetricConfigSlice expands the contents of ClusterConfigDataprocMetricConfig into a JSON +// request object. +func expandClusterConfigDataprocMetricConfigSlice(c *Client, f []ClusterConfigDataprocMetricConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigDataprocMetricConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigDataprocMetricConfigMap flattens the contents of ClusterConfigDataprocMetricConfig from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigDataprocMetricConfig{} + } + + if len(a) == 0 { + return map[string]ClusterConfigDataprocMetricConfig{} + } + + items := make(map[string]ClusterConfigDataprocMetricConfig) + for k, item := range a { + items[k] = *flattenClusterConfigDataprocMetricConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigDataprocMetricConfigSlice flattens the contents of ClusterConfigDataprocMetricConfig from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigDataprocMetricConfig{} + } + + if len(a) == 0 { + return []ClusterConfigDataprocMetricConfig{} + } + + items := make([]ClusterConfigDataprocMetricConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigDataprocMetricConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigDataprocMetricConfig expands an instance of ClusterConfigDataprocMetricConfig into a JSON +// request object. +func expandClusterConfigDataprocMetricConfig(c *Client, f *ClusterConfigDataprocMetricConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterConfigDataprocMetricConfigMetricsSlice(c, f.Metrics, res); err != nil { + return nil, fmt.Errorf("error expanding Metrics into metrics: %w", err) + } else if v != nil { + m["metrics"] = v + } + + return m, nil +} + +// flattenClusterConfigDataprocMetricConfig flattens an instance of ClusterConfigDataprocMetricConfig from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigDataprocMetricConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigDataprocMetricConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigDataprocMetricConfig + } + r.Metrics = flattenClusterConfigDataprocMetricConfigMetricsSlice(c, m["metrics"], res) + + return r +} + +// expandClusterConfigDataprocMetricConfigMetricsMap expands the contents of ClusterConfigDataprocMetricConfigMetrics into a JSON +// request object. +func expandClusterConfigDataprocMetricConfigMetricsMap(c *Client, f map[string]ClusterConfigDataprocMetricConfigMetrics, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterConfigDataprocMetricConfigMetrics(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterConfigDataprocMetricConfigMetricsSlice expands the contents of ClusterConfigDataprocMetricConfigMetrics into a JSON +// request object. +func expandClusterConfigDataprocMetricConfigMetricsSlice(c *Client, f []ClusterConfigDataprocMetricConfigMetrics, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterConfigDataprocMetricConfigMetrics(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterConfigDataprocMetricConfigMetricsMap flattens the contents of ClusterConfigDataprocMetricConfigMetrics from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMetricsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfigMetrics { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigDataprocMetricConfigMetrics{} + } + + if len(a) == 0 { + return map[string]ClusterConfigDataprocMetricConfigMetrics{} + } + + items := make(map[string]ClusterConfigDataprocMetricConfigMetrics) + for k, item := range a { + items[k] = *flattenClusterConfigDataprocMetricConfigMetrics(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterConfigDataprocMetricConfigMetricsSlice flattens the contents of ClusterConfigDataprocMetricConfigMetrics from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMetricsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfigMetrics { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigDataprocMetricConfigMetrics{} + } + + if len(a) == 0 { + return []ClusterConfigDataprocMetricConfigMetrics{} + } + + items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigDataprocMetricConfigMetrics(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterConfigDataprocMetricConfigMetrics expands an instance of ClusterConfigDataprocMetricConfigMetrics into a JSON +// request object. +func expandClusterConfigDataprocMetricConfigMetrics(c *Client, f *ClusterConfigDataprocMetricConfigMetrics, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MetricSource; !dcl.IsEmptyValueIndirect(v) { + m["metricSource"] = v + } + if v := f.MetricOverrides; v != nil { + m["metricOverrides"] = v + } + + return m, nil +} + +// flattenClusterConfigDataprocMetricConfigMetrics flattens an instance of ClusterConfigDataprocMetricConfigMetrics from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMetrics(c *Client, i interface{}, res *Cluster) *ClusterConfigDataprocMetricConfigMetrics { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterConfigDataprocMetricConfigMetrics{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterConfigDataprocMetricConfigMetrics + } + r.MetricSource = flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(m["metricSource"]) + r.MetricOverrides = dcl.FlattenStringSlice(m["metricOverrides"]) + + return r +} + +// expandClusterStatusMap expands the contents of ClusterStatus into a JSON +// request object. +func expandClusterStatusMap(c *Client, f map[string]ClusterStatus, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterStatus(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterStatusSlice expands the contents of ClusterStatus into a JSON +// request object. +func expandClusterStatusSlice(c *Client, f []ClusterStatus, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterStatus(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterStatusMap flattens the contents of ClusterStatus from a JSON +// response object. +func flattenClusterStatusMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatus { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatus{} + } + + if len(a) == 0 { + return map[string]ClusterStatus{} + } + + items := make(map[string]ClusterStatus) + for k, item := range a { + items[k] = *flattenClusterStatus(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterStatusSlice flattens the contents of ClusterStatus from a JSON +// response object. +func flattenClusterStatusSlice(c *Client, i interface{}, res *Cluster) []ClusterStatus { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatus{} + } + + if len(a) == 0 { + return []ClusterStatus{} + } + + items := make([]ClusterStatus, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatus(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterStatus expands an instance of ClusterStatus into a JSON +// request object. +func expandClusterStatus(c *Client, f *ClusterStatus, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenClusterStatus flattens an instance of ClusterStatus from a JSON +// response object. +func flattenClusterStatus(c *Client, i interface{}, res *Cluster) *ClusterStatus { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterStatus{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterStatus + } + r.State = flattenClusterStatusStateEnum(m["state"]) + r.Detail = dcl.FlattenString(m["detail"]) + r.StateStartTime = dcl.FlattenString(m["stateStartTime"]) + r.Substate = flattenClusterStatusSubstateEnum(m["substate"]) + + return r +} + +// expandClusterStatusHistoryMap expands the contents of ClusterStatusHistory into a JSON +// request object. +func expandClusterStatusHistoryMap(c *Client, f map[string]ClusterStatusHistory, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterStatusHistory(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterStatusHistorySlice expands the contents of ClusterStatusHistory into a JSON +// request object. +func expandClusterStatusHistorySlice(c *Client, f []ClusterStatusHistory, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterStatusHistory(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterStatusHistoryMap flattens the contents of ClusterStatusHistory from a JSON +// response object. +func flattenClusterStatusHistoryMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistory { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatusHistory{} + } + + if len(a) == 0 { + return map[string]ClusterStatusHistory{} + } + + items := make(map[string]ClusterStatusHistory) + for k, item := range a { + items[k] = *flattenClusterStatusHistory(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterStatusHistorySlice flattens the contents of ClusterStatusHistory from a JSON +// response object. +func flattenClusterStatusHistorySlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistory { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatusHistory{} + } + + if len(a) == 0 { + return []ClusterStatusHistory{} + } + + items := make([]ClusterStatusHistory, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatusHistory(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterStatusHistory expands an instance of ClusterStatusHistory into a JSON +// request object. +func expandClusterStatusHistory(c *Client, f *ClusterStatusHistory, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenClusterStatusHistory flattens an instance of ClusterStatusHistory from a JSON +// response object. +func flattenClusterStatusHistory(c *Client, i interface{}, res *Cluster) *ClusterStatusHistory { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterStatusHistory{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterStatusHistory + } + r.State = flattenClusterStatusHistoryStateEnum(m["state"]) + r.Detail = dcl.FlattenString(m["detail"]) + r.StateStartTime = dcl.FlattenString(m["stateStartTime"]) + r.Substate = flattenClusterStatusHistorySubstateEnum(m["substate"]) + + return r +} + +// expandClusterMetricsMap expands the contents of ClusterMetrics into a JSON +// request object. +func expandClusterMetricsMap(c *Client, f map[string]ClusterMetrics, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterMetrics(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterMetricsSlice expands the contents of ClusterMetrics into a JSON +// request object. +func expandClusterMetricsSlice(c *Client, f []ClusterMetrics, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterMetrics(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterMetricsMap flattens the contents of ClusterMetrics from a JSON +// response object. +func flattenClusterMetricsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMetrics { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterMetrics{} + } + + if len(a) == 0 { + return map[string]ClusterMetrics{} + } + + items := make(map[string]ClusterMetrics) + for k, item := range a { + items[k] = *flattenClusterMetrics(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterMetricsSlice flattens the contents of ClusterMetrics from a JSON +// response object. +func flattenClusterMetricsSlice(c *Client, i interface{}, res *Cluster) []ClusterMetrics { + a, ok := i.([]interface{}) + if !ok { + return []ClusterMetrics{} + } + + if len(a) == 0 { + return []ClusterMetrics{} + } + + items := make([]ClusterMetrics, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterMetrics(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterMetrics expands an instance of ClusterMetrics into a JSON +// request object. +func expandClusterMetrics(c *Client, f *ClusterMetrics, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HdfsMetrics; !dcl.IsEmptyValueIndirect(v) { + m["hdfsMetrics"] = v + } + if v := f.YarnMetrics; !dcl.IsEmptyValueIndirect(v) { + m["yarnMetrics"] = v + } + + return m, nil +} + +// flattenClusterMetrics flattens an instance of ClusterMetrics from a JSON +// response object. +func flattenClusterMetrics(c *Client, i interface{}, res *Cluster) *ClusterMetrics { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterMetrics{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterMetrics + } + r.HdfsMetrics = dcl.FlattenKeyValuePairs(m["hdfsMetrics"]) + r.YarnMetrics = dcl.FlattenKeyValuePairs(m["yarnMetrics"]) + + return r +} + +// expandClusterVirtualClusterConfigMap expands the contents of ClusterVirtualClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigSlice expands the contents of ClusterVirtualClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigMap flattens the contents of ClusterVirtualClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigSlice flattens the contents of ClusterVirtualClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfig{} + } + + items := make([]ClusterVirtualClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfig expands an instance of ClusterVirtualClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfig(c *Client, f *ClusterVirtualClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { + m["stagingBucket"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, f.KubernetesClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KubernetesClusterConfig into kubernetesClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetesClusterConfig"] = v + } + if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, f.AuxiliaryServicesConfig, res); err != nil { + return nil, fmt.Errorf("error expanding AuxiliaryServicesConfig into auxiliaryServicesConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["auxiliaryServicesConfig"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfig flattens an instance of ClusterVirtualClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfig + } + r.StagingBucket = dcl.FlattenString(m["stagingBucket"]) + r.KubernetesClusterConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, m["kubernetesClusterConfig"], res) + r.AuxiliaryServicesConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, m["auxiliaryServicesConfig"], res) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KubernetesNamespace; !dcl.IsEmptyValueIndirect(v) { + m["kubernetesNamespace"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gkeClusterConfig"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, f.KubernetesSoftwareConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KubernetesSoftwareConfig into kubernetesSoftwareConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetesSoftwareConfig"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfig + } + r.KubernetesNamespace = dcl.FlattenString(m["kubernetesNamespace"]) + r.GkeClusterConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) + r.KubernetesSoftwareConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, m["kubernetesSoftwareConfig"], res) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.GkeClusterTarget; !dcl.IsEmptyValueIndirect(v) { + m["gkeClusterTarget"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, f.NodePoolTarget, res); err != nil { + return nil, fmt.Errorf("error expanding NodePoolTarget into nodePoolTarget: %w", err) + } else if v != nil { + m["nodePoolTarget"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig + } + r.GkeClusterTarget = dcl.FlattenString(m["gkeClusterTarget"]) + r.NodePoolTarget = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, m["nodePoolTarget"], res) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NodePool; !dcl.IsEmptyValueIndirect(v) { + m["nodePool"] = v + } + if v := f.Roles; v != nil { + m["roles"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, f.NodePoolConfig, res); err != nil { + return nil, fmt.Errorf("error expanding NodePoolConfig into nodePoolConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["nodePoolConfig"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget + } + r.NodePool = dcl.FlattenString(m["nodePool"]) + r.Roles = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice(c, m["roles"], res) + r.NodePoolConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, m["nodePoolConfig"], res) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.Locations; v != nil { + m["locations"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, f.Autoscaling, res); err != nil { + return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscaling"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig + } + r.Config = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, m["config"], res) + r.Locations = dcl.FlattenStringSlice(m["locations"]) + r.Autoscaling = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, m["autoscaling"], res) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineType"] = v + } + if v := f.LocalSsdCount; !dcl.IsEmptyValueIndirect(v) { + m["localSsdCount"] = v + } + if v := f.Preemptible; !dcl.IsEmptyValueIndirect(v) { + m["preemptible"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + if v := f.BootDiskKmsKey; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskKmsKey"] = v + } + if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, f.EphemeralStorageConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EphemeralStorageConfig into ephemeralStorageConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["ephemeralStorageConfig"] = v + } + if v := f.Spot; !dcl.IsEmptyValueIndirect(v) { + m["spot"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig + } + r.MachineType = dcl.FlattenString(m["machineType"]) + r.LocalSsdCount = dcl.FlattenInteger(m["localSsdCount"]) + r.Preemptible = dcl.FlattenBool(m["preemptible"]) + r.Accelerators = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + r.BootDiskKmsKey = dcl.FlattenString(m["bootDiskKmsKey"]) + r.EphemeralStorageConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, m["ephemeralStorageConfig"], res) + r.Spot = dcl.FlattenBool(m["spot"]) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorType"] = v + } + if v := f.GpuPartitionSize; !dcl.IsEmptyValueIndirect(v) { + m["gpuPartitionSize"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators + } + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + r.AcceleratorType = dcl.FlattenString(m["acceleratorType"]) + r.GpuPartitionSize = dcl.FlattenString(m["gpuPartitionSize"]) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.LocalSsdCount; !dcl.IsEmptyValueIndirect(v) { + m["localSsdCount"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig + } + r.LocalSsdCount = dcl.FlattenInteger(m["localSsdCount"]) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["minNodeCount"] = v + } + if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { + m["maxNodeCount"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling + } + r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) + r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) + + return r +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ComponentVersion; !dcl.IsEmptyValueIndirect(v) { + m["componentVersion"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig + } + r.ComponentVersion = dcl.FlattenKeyValuePairs(m["componentVersion"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + + return r +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastoreConfig"] = v + } + if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, f.SparkHistoryServerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SparkHistoryServerConfig into sparkHistoryServerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkHistoryServerConfig"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig + } + r.MetastoreConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, m["metastoreConfig"], res) + r.SparkHistoryServerConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, m["sparkHistoryServerConfig"], res) + + return r +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { + m["dataprocMetastoreService"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig + } + r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) + + return r +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + + items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + + items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON +// request object. +func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataprocCluster; !dcl.IsEmptyValueIndirect(v) { + m["dataprocCluster"] = v + } + + return m, nil +} + +// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON +// response object. +func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig + } + r.DataprocCluster = dcl.FlattenString(m["dataprocCluster"]) + + return r +} + +// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap flattens the contents of ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make(map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice flattens the contents of ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make([]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the same value as that string. +func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(i interface{}) *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s) +} + +// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap flattens the contents of ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make(map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) + for k, item := range a { + items[k] = *flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice flattens the contents of ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make([]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the same value as that string. +func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(i interface{}) *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s) +} + +// flattenClusterConfigMasterConfigPreemptibilityEnumMap flattens the contents of ClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigMasterConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make(map[string]ClusterConfigMasterConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenClusterConfigMasterConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigMasterConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigMasterConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []ClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make([]ClusterConfigMasterConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigMasterConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigMasterConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigMasterConfigPreemptibilityEnum with the same value as that string. +func flattenClusterConfigMasterConfigPreemptibilityEnum(i interface{}) *ClusterConfigMasterConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigMasterConfigPreemptibilityEnumRef(s) +} + +// flattenClusterConfigWorkerConfigPreemptibilityEnumMap flattens the contents of ClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]ClusterConfigWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigWorkerConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []ClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make([]ClusterConfigWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenClusterConfigWorkerConfigPreemptibilityEnum(i interface{}) *ClusterConfigWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap flattens the contents of ClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make([]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(i interface{}) *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenClusterConfigSoftwareConfigOptionalComponentsEnumMap flattens the contents of ClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenClusterConfigSoftwareConfigOptionalComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make(map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum) + for k, item := range a { + items[k] = *flattenClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice flattens the contents of ClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return []ClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make([]ClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigSoftwareConfigOptionalComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigSoftwareConfigOptionalComponentsEnum with the same value as that string. +func flattenClusterConfigSoftwareConfigOptionalComponentsEnum(i interface{}) *ClusterConfigSoftwareConfigOptionalComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigSoftwareConfigOptionalComponentsEnumRef(s) +} + +// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumMap flattens the contents of ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} + } + + if len(a) == 0 { + return map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} + } + + items := make(map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) + for k, item := range a { + items[k] = *flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(item.(interface{})) + } + + return items +} + +// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumSlice flattens the contents of ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum from a JSON +// response object. +func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} + } + + if len(a) == 0 { + return []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} + } + + items := make([]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum with the same value as that string. +func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(i interface{}) *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef(s) +} + +// flattenClusterStatusStateEnumMap flattens the contents of ClusterStatusStateEnum from a JSON +// response object. +func flattenClusterStatusStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatusStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStatusStateEnum{} + } + + items := make(map[string]ClusterStatusStateEnum) + for k, item := range a { + items[k] = *flattenClusterStatusStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStatusStateEnumSlice flattens the contents of ClusterStatusStateEnum from a JSON +// response object. +func flattenClusterStatusStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatusStateEnum{} + } + + if len(a) == 0 { + return []ClusterStatusStateEnum{} + } + + items := make([]ClusterStatusStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatusStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStatusStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStatusStateEnum with the same value as that string. +func flattenClusterStatusStateEnum(i interface{}) *ClusterStatusStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStatusStateEnumRef(s) +} + +// flattenClusterStatusSubstateEnumMap flattens the contents of ClusterStatusSubstateEnum from a JSON +// response object. +func flattenClusterStatusSubstateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusSubstateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatusSubstateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStatusSubstateEnum{} + } + + items := make(map[string]ClusterStatusSubstateEnum) + for k, item := range a { + items[k] = *flattenClusterStatusSubstateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStatusSubstateEnumSlice flattens the contents of ClusterStatusSubstateEnum from a JSON +// response object. +func flattenClusterStatusSubstateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusSubstateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatusSubstateEnum{} + } + + if len(a) == 0 { + return []ClusterStatusSubstateEnum{} + } + + items := make([]ClusterStatusSubstateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatusSubstateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStatusSubstateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStatusSubstateEnum with the same value as that string. +func flattenClusterStatusSubstateEnum(i interface{}) *ClusterStatusSubstateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStatusSubstateEnumRef(s) +} + +// flattenClusterStatusHistoryStateEnumMap flattens the contents of ClusterStatusHistoryStateEnum from a JSON +// response object. +func flattenClusterStatusHistoryStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistoryStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatusHistoryStateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStatusHistoryStateEnum{} + } + + items := make(map[string]ClusterStatusHistoryStateEnum) + for k, item := range a { + items[k] = *flattenClusterStatusHistoryStateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStatusHistoryStateEnumSlice flattens the contents of ClusterStatusHistoryStateEnum from a JSON +// response object. +func flattenClusterStatusHistoryStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistoryStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatusHistoryStateEnum{} + } + + if len(a) == 0 { + return []ClusterStatusHistoryStateEnum{} + } + + items := make([]ClusterStatusHistoryStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatusHistoryStateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStatusHistoryStateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStatusHistoryStateEnum with the same value as that string. +func flattenClusterStatusHistoryStateEnum(i interface{}) *ClusterStatusHistoryStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStatusHistoryStateEnumRef(s) +} + +// flattenClusterStatusHistorySubstateEnumMap flattens the contents of ClusterStatusHistorySubstateEnum from a JSON +// response object. +func flattenClusterStatusHistorySubstateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistorySubstateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterStatusHistorySubstateEnum{} + } + + if len(a) == 0 { + return map[string]ClusterStatusHistorySubstateEnum{} + } + + items := make(map[string]ClusterStatusHistorySubstateEnum) + for k, item := range a { + items[k] = *flattenClusterStatusHistorySubstateEnum(item.(interface{})) + } + + return items +} + +// flattenClusterStatusHistorySubstateEnumSlice flattens the contents of ClusterStatusHistorySubstateEnum from a JSON +// response object. +func flattenClusterStatusHistorySubstateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistorySubstateEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterStatusHistorySubstateEnum{} + } + + if len(a) == 0 { + return []ClusterStatusHistorySubstateEnum{} + } + + items := make([]ClusterStatusHistorySubstateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterStatusHistorySubstateEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterStatusHistorySubstateEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterStatusHistorySubstateEnum with the same value as that string. +func flattenClusterStatusHistorySubstateEnum(i interface{}) *ClusterStatusHistorySubstateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterStatusHistorySubstateEnumRef(s) +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} + } + + if len(a) == 0 { + return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} + } + + items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) + for k, item := range a { + items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(item.(interface{})) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum from a JSON +// response object. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { + a, ok := i.([]interface{}) + if !ok { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} + } + + if len(a) == 0 { + return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} + } + + items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(item.(interface{}))) + } + + return items +} + +// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum asserts that an interface is a string, and returns a +// pointer to a *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum with the same value as that string. +func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(i interface{}) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Cluster) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalCluster(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type clusterDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp clusterApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []clusterDiff + // For each operation name, create a clusterDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { + switch opName { + + case "updateClusterUpdateClusterOperation": + return &updateClusterUpdateClusterOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractClusterFields(r *Cluster) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &ClusterConfig{} + } + if err := extractClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vStatus := r.Status + if vStatus == nil { + // note: explicitly not the empty object. + vStatus = &ClusterStatus{} + } + if err := extractClusterStatusFields(r, vStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStatus) { + r.Status = vStatus + } + vMetrics := r.Metrics + if vMetrics == nil { + // note: explicitly not the empty object. + vMetrics = &ClusterMetrics{} + } + if err := extractClusterMetricsFields(r, vMetrics); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetrics) { + r.Metrics = vMetrics + } + vVirtualClusterConfig := r.VirtualClusterConfig + if vVirtualClusterConfig == nil { + // note: explicitly not the empty object. + vVirtualClusterConfig = &ClusterVirtualClusterConfig{} + } + if err := extractClusterVirtualClusterConfigFields(r, vVirtualClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vVirtualClusterConfig) { + r.VirtualClusterConfig = vVirtualClusterConfig + } + return nil +} +func extractClusterConfigFields(r *Cluster, o *ClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &ClusterConfigGceClusterConfig{} + } + if err := extractClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &ClusterConfigMasterConfig{} + } + if err := extractClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &ClusterConfigWorkerConfig{} + } + if err := extractClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &ClusterConfigSoftwareConfig{} + } + if err := extractClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &ClusterConfigEncryptionConfig{} + } + if err := extractClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &ClusterConfigAutoscalingConfig{} + } + if err := extractClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &ClusterConfigSecurityConfig{} + } + if err := extractClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &ClusterConfigLifecycleConfig{} + } + if err := extractClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &ClusterConfigEndpointConfig{} + } + if err := extractClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &ClusterConfigGkeClusterConfig{} + } + if err := extractClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &ClusterConfigMetastoreConfig{} + } + if err := extractClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } + vDataprocMetricConfig := o.DataprocMetricConfig + if vDataprocMetricConfig == nil { + // note: explicitly not the empty object. + vDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{} + } + if err := extractClusterConfigDataprocMetricConfigFields(r, vDataprocMetricConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDataprocMetricConfig) { + o.DataprocMetricConfig = vDataprocMetricConfig + } + return nil +} +func extractClusterConfigGceClusterConfigFields(r *Cluster, o *ClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + vConfidentialInstanceConfig := o.ConfidentialInstanceConfig + if vConfidentialInstanceConfig == nil { + // note: explicitly not the empty object. + vConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + if err := extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r, vConfidentialInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfidentialInstanceConfig) { + o.ConfidentialInstanceConfig = vConfidentialInstanceConfig + } + return nil +} +func extractClusterConfigGceClusterConfigReservationAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigConfidentialInstanceConfig) error { + return nil +} +func extractClusterConfigMasterConfigFields(r *Cluster, o *ClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigMasterConfigDiskConfig{} + } + if err := extractClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractClusterConfigMasterConfigDiskConfigFields(r *Cluster, o *ClusterConfigMasterConfigDiskConfig) error { + return nil +} +func extractClusterConfigMasterConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func extractClusterConfigMasterConfigAcceleratorsFields(r *Cluster, o *ClusterConfigMasterConfigAccelerators) error { + return nil +} +func extractClusterConfigMasterConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigMasterConfigInstanceReferences) error { + return nil +} +func extractClusterConfigWorkerConfigFields(r *Cluster, o *ClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigWorkerConfigDiskConfig{} + } + if err := extractClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractClusterConfigWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func extractClusterConfigWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func extractClusterConfigWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigWorkerConfigAccelerators) error { + return nil +} +func extractClusterConfigWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigWorkerConfigInstanceReferences) error { + return nil +} +func extractClusterConfigSecondaryWorkerConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func extractClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func extractClusterConfigSecondaryWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigInstanceReferences) error { + return nil +} +func extractClusterConfigSoftwareConfigFields(r *Cluster, o *ClusterConfigSoftwareConfig) error { + return nil +} +func extractClusterConfigInitializationActionsFields(r *Cluster, o *ClusterConfigInitializationActions) error { + return nil +} +func extractClusterConfigEncryptionConfigFields(r *Cluster, o *ClusterConfigEncryptionConfig) error { + return nil +} +func extractClusterConfigAutoscalingConfigFields(r *Cluster, o *ClusterConfigAutoscalingConfig) error { + return nil +} +func extractClusterConfigSecurityConfigFields(r *Cluster, o *ClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + vIdentityConfig := o.IdentityConfig + if vIdentityConfig == nil { + // note: explicitly not the empty object. + vIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{} + } + if err := extractClusterConfigSecurityConfigIdentityConfigFields(r, vIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIdentityConfig) { + o.IdentityConfig = vIdentityConfig + } + return nil +} +func extractClusterConfigSecurityConfigKerberosConfigFields(r *Cluster, o *ClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func extractClusterConfigSecurityConfigIdentityConfigFields(r *Cluster, o *ClusterConfigSecurityConfigIdentityConfig) error { + return nil +} +func extractClusterConfigLifecycleConfigFields(r *Cluster, o *ClusterConfigLifecycleConfig) error { + return nil +} +func extractClusterConfigEndpointConfigFields(r *Cluster, o *ClusterConfigEndpointConfig) error { + return nil +} +func extractClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *Cluster, o *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func extractClusterConfigMetastoreConfigFields(r *Cluster, o *ClusterConfigMetastoreConfig) error { + return nil +} +func extractClusterConfigDataprocMetricConfigFields(r *Cluster, o *ClusterConfigDataprocMetricConfig) error { + return nil +} +func extractClusterConfigDataprocMetricConfigMetricsFields(r *Cluster, o *ClusterConfigDataprocMetricConfigMetrics) error { + return nil +} +func extractClusterStatusFields(r *Cluster, o *ClusterStatus) error { + return nil +} +func extractClusterStatusHistoryFields(r *Cluster, o *ClusterStatusHistory) error { + return nil +} +func extractClusterMetricsFields(r *Cluster, o *ClusterMetrics) error { + return nil +} +func extractClusterVirtualClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfig) error { + vKubernetesClusterConfig := o.KubernetesClusterConfig + if vKubernetesClusterConfig == nil { + // note: explicitly not the empty object. + vKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r, vKubernetesClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesClusterConfig) { + o.KubernetesClusterConfig = vKubernetesClusterConfig + } + vAuxiliaryServicesConfig := o.AuxiliaryServicesConfig + if vAuxiliaryServicesConfig == nil { + // note: explicitly not the empty object. + vAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r, vAuxiliaryServicesConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuxiliaryServicesConfig) { + o.AuxiliaryServicesConfig = vAuxiliaryServicesConfig + } + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfig) error { + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vKubernetesSoftwareConfig := o.KubernetesSoftwareConfig + if vKubernetesSoftwareConfig == nil { + // note: explicitly not the empty object. + vKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r, vKubernetesSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesSoftwareConfig) { + o.KubernetesSoftwareConfig = vKubernetesSoftwareConfig + } + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) error { + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) error { + vNodePoolConfig := o.NodePoolConfig + if vNodePoolConfig == nil { + // note: explicitly not the empty object. + vNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r, vNodePoolConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodePoolConfig) { + o.NodePoolConfig = vNodePoolConfig + } + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + vAutoscaling := o.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + o.Autoscaling = vAutoscaling + } + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) error { + vEphemeralStorageConfig := o.EphemeralStorageConfig + if vEphemeralStorageConfig == nil { + // note: explicitly not the empty object. + vEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r, vEphemeralStorageConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEphemeralStorageConfig) { + o.EphemeralStorageConfig = vEphemeralStorageConfig + } + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) error { + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) error { + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) error { + return nil +} +func extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) error { + return nil +} +func extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfig) error { + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } + vSparkHistoryServerConfig := o.SparkHistoryServerConfig + if vSparkHistoryServerConfig == nil { + // note: explicitly not the empty object. + vSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r, vSparkHistoryServerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkHistoryServerConfig) { + o.SparkHistoryServerConfig = vSparkHistoryServerConfig + } + return nil +} +func extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) error { + return nil +} +func extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) error { + return nil +} + +func postReadExtractClusterFields(r *Cluster) error { + vConfig := r.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &ClusterConfig{} + } + if err := postReadExtractClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + r.Config = vConfig + } + vStatus := r.Status + if vStatus == nil { + // note: explicitly not the empty object. + vStatus = &ClusterStatus{} + } + if err := postReadExtractClusterStatusFields(r, vStatus); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStatus) { + r.Status = vStatus + } + vMetrics := r.Metrics + if vMetrics == nil { + // note: explicitly not the empty object. + vMetrics = &ClusterMetrics{} + } + if err := postReadExtractClusterMetricsFields(r, vMetrics); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetrics) { + r.Metrics = vMetrics + } + vVirtualClusterConfig := r.VirtualClusterConfig + if vVirtualClusterConfig == nil { + // note: explicitly not the empty object. + vVirtualClusterConfig = &ClusterVirtualClusterConfig{} + } + if err := postReadExtractClusterVirtualClusterConfigFields(r, vVirtualClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vVirtualClusterConfig) { + r.VirtualClusterConfig = vVirtualClusterConfig + } + return nil +} +func postReadExtractClusterConfigFields(r *Cluster, o *ClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &ClusterConfigGceClusterConfig{} + } + if err := extractClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &ClusterConfigMasterConfig{} + } + if err := extractClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &ClusterConfigWorkerConfig{} + } + if err := extractClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &ClusterConfigSoftwareConfig{} + } + if err := extractClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &ClusterConfigEncryptionConfig{} + } + if err := extractClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &ClusterConfigAutoscalingConfig{} + } + if err := extractClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &ClusterConfigSecurityConfig{} + } + if err := extractClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &ClusterConfigLifecycleConfig{} + } + if err := extractClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &ClusterConfigEndpointConfig{} + } + if err := extractClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &ClusterConfigGkeClusterConfig{} + } + if err := extractClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &ClusterConfigMetastoreConfig{} + } + if err := extractClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } + vDataprocMetricConfig := o.DataprocMetricConfig + if vDataprocMetricConfig == nil { + // note: explicitly not the empty object. + vDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{} + } + if err := extractClusterConfigDataprocMetricConfigFields(r, vDataprocMetricConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDataprocMetricConfig) { + o.DataprocMetricConfig = vDataprocMetricConfig + } + return nil +} +func postReadExtractClusterConfigGceClusterConfigFields(r *Cluster, o *ClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + vConfidentialInstanceConfig := o.ConfidentialInstanceConfig + if vConfidentialInstanceConfig == nil { + // note: explicitly not the empty object. + vConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} + } + if err := extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r, vConfidentialInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfidentialInstanceConfig) { + o.ConfidentialInstanceConfig = vConfidentialInstanceConfig + } + return nil +} +func postReadExtractClusterConfigGceClusterConfigReservationAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func postReadExtractClusterConfigGceClusterConfigNodeGroupAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func postReadExtractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func postReadExtractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigConfidentialInstanceConfig) error { + return nil +} +func postReadExtractClusterConfigMasterConfigFields(r *Cluster, o *ClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigMasterConfigDiskConfig{} + } + if err := extractClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractClusterConfigMasterConfigDiskConfigFields(r *Cluster, o *ClusterConfigMasterConfigDiskConfig) error { + return nil +} +func postReadExtractClusterConfigMasterConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func postReadExtractClusterConfigMasterConfigAcceleratorsFields(r *Cluster, o *ClusterConfigMasterConfigAccelerators) error { + return nil +} +func postReadExtractClusterConfigMasterConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigMasterConfigInstanceReferences) error { + return nil +} +func postReadExtractClusterConfigWorkerConfigFields(r *Cluster, o *ClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigWorkerConfigDiskConfig{} + } + if err := extractClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractClusterConfigWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractClusterConfigWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractClusterConfigWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigWorkerConfigAccelerators) error { + return nil +} +func postReadExtractClusterConfigWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigWorkerConfigInstanceReferences) error { + return nil +} +func postReadExtractClusterConfigSecondaryWorkerConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractClusterConfigSecondaryWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func postReadExtractClusterConfigSecondaryWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigInstanceReferences) error { + return nil +} +func postReadExtractClusterConfigSoftwareConfigFields(r *Cluster, o *ClusterConfigSoftwareConfig) error { + return nil +} +func postReadExtractClusterConfigInitializationActionsFields(r *Cluster, o *ClusterConfigInitializationActions) error { + return nil +} +func postReadExtractClusterConfigEncryptionConfigFields(r *Cluster, o *ClusterConfigEncryptionConfig) error { + return nil +} +func postReadExtractClusterConfigAutoscalingConfigFields(r *Cluster, o *ClusterConfigAutoscalingConfig) error { + return nil +} +func postReadExtractClusterConfigSecurityConfigFields(r *Cluster, o *ClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + vIdentityConfig := o.IdentityConfig + if vIdentityConfig == nil { + // note: explicitly not the empty object. + vIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{} + } + if err := extractClusterConfigSecurityConfigIdentityConfigFields(r, vIdentityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIdentityConfig) { + o.IdentityConfig = vIdentityConfig + } + return nil +} +func postReadExtractClusterConfigSecurityConfigKerberosConfigFields(r *Cluster, o *ClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func postReadExtractClusterConfigSecurityConfigIdentityConfigFields(r *Cluster, o *ClusterConfigSecurityConfigIdentityConfig) error { + return nil +} +func postReadExtractClusterConfigLifecycleConfigFields(r *Cluster, o *ClusterConfigLifecycleConfig) error { + return nil +} +func postReadExtractClusterConfigEndpointConfigFields(r *Cluster, o *ClusterConfigEndpointConfig) error { + return nil +} +func postReadExtractClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func postReadExtractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *Cluster, o *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func postReadExtractClusterConfigMetastoreConfigFields(r *Cluster, o *ClusterConfigMetastoreConfig) error { + return nil +} +func postReadExtractClusterConfigDataprocMetricConfigFields(r *Cluster, o *ClusterConfigDataprocMetricConfig) error { + return nil +} +func postReadExtractClusterConfigDataprocMetricConfigMetricsFields(r *Cluster, o *ClusterConfigDataprocMetricConfigMetrics) error { + return nil +} +func postReadExtractClusterStatusFields(r *Cluster, o *ClusterStatus) error { + return nil +} +func postReadExtractClusterStatusHistoryFields(r *Cluster, o *ClusterStatusHistory) error { + return nil +} +func postReadExtractClusterMetricsFields(r *Cluster, o *ClusterMetrics) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfig) error { + vKubernetesClusterConfig := o.KubernetesClusterConfig + if vKubernetesClusterConfig == nil { + // note: explicitly not the empty object. + vKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r, vKubernetesClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesClusterConfig) { + o.KubernetesClusterConfig = vKubernetesClusterConfig + } + vAuxiliaryServicesConfig := o.AuxiliaryServicesConfig + if vAuxiliaryServicesConfig == nil { + // note: explicitly not the empty object. + vAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r, vAuxiliaryServicesConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuxiliaryServicesConfig) { + o.AuxiliaryServicesConfig = vAuxiliaryServicesConfig + } + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfig) error { + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vKubernetesSoftwareConfig := o.KubernetesSoftwareConfig + if vKubernetesSoftwareConfig == nil { + // note: explicitly not the empty object. + vKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r, vKubernetesSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesSoftwareConfig) { + o.KubernetesSoftwareConfig = vKubernetesSoftwareConfig + } + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) error { + vNodePoolConfig := o.NodePoolConfig + if vNodePoolConfig == nil { + // note: explicitly not the empty object. + vNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r, vNodePoolConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodePoolConfig) { + o.NodePoolConfig = vNodePoolConfig + } + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + vAutoscaling := o.Autoscaling + if vAutoscaling == nil { + // note: explicitly not the empty object. + vAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r, vAutoscaling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscaling) { + o.Autoscaling = vAutoscaling + } + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) error { + vEphemeralStorageConfig := o.EphemeralStorageConfig + if vEphemeralStorageConfig == nil { + // note: explicitly not the empty object. + vEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} + } + if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r, vEphemeralStorageConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEphemeralStorageConfig) { + o.EphemeralStorageConfig = vEphemeralStorageConfig + } + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfig) error { + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } + vSparkHistoryServerConfig := o.SparkHistoryServerConfig + if vSparkHistoryServerConfig == nil { + // note: explicitly not the empty object. + vSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} + } + if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r, vSparkHistoryServerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkHistoryServerConfig) { + o.SparkHistoryServerConfig = vSparkHistoryServerConfig + } + return nil +} +func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) error { + return nil +} +func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl new file mode 100644 index 000000000000..962286ba33e1 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl @@ -0,0 +1,1941 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLClusterSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataproc/Cluster", + Description: "The Dataproc Cluster resource", + StructName: "Cluster", + HasIAM: true, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "cluster", + Required: true, + Description: "A full instance of a Cluster", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Cluster", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Cluster": &dcl.Component{ + Title: "Cluster", + ID: "projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", + UsesStateHint: true, + ParentContainer: "project", + LabelsField: "labels", + HasCreate: true, + HasIAM: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "project", + "name", + "location", + }, + Properties: map[string]*dcl.Property{ + "clusterUuid": &dcl.Property{ + Type: "string", + GoName: "ClusterUuid", + ReadOnly: true, + Description: "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", + Immutable: true, + }, + "config": &dcl.Property{ + Type: "object", + GoName: "Config", + GoType: "ClusterConfig", + Description: "The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "autoscalingConfig": &dcl.Property{ + Type: "object", + GoName: "AutoscalingConfig", + GoType: "ClusterConfigAutoscalingConfig", + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "policy": &dcl.Property{ + Type: "string", + GoName: "Policy", + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Dataproc/AutoscalingPolicy", + Field: "name", + }, + }, + }, + }, + }, + "dataprocMetricConfig": &dcl.Property{ + Type: "object", + GoName: "DataprocMetricConfig", + GoType: "ClusterConfigDataprocMetricConfig", + Description: "Optional. The config for Dataproc metrics.", + Immutable: true, + Required: []string{ + "metrics", + }, + Properties: map[string]*dcl.Property{ + "metrics": &dcl.Property{ + Type: "array", + GoName: "Metrics", + Description: "Required. Metrics sources to enable.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigDataprocMetricConfigMetrics", + Required: []string{ + "metricSource", + }, + Properties: map[string]*dcl.Property{ + "metricOverrides": &dcl.Property{ + Type: "array", + GoName: "MetricOverrides", + Description: "Optional. Specify one or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course (for the `SPARK` metric source, any [Spark metric] (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified). Provide metrics in the following format: `METRIC_SOURCE:INSTANCE:GROUP:METRIC` Use camelcase as appropriate. Examples: ``` yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used ``` Notes: * Only the specified overridden metrics will be collected for the metric source. For example, if one or more `spark:executive` metrics are listed as metric overrides, other `SPARK` metrics will not be collected. The collection of the default metrics for other OSS metric sources is unaffected. For example, if both `SPARK` andd `YARN` metric sources are enabled, and overrides are provided for Spark metrics only, all default YARN metrics will be collected.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "metricSource": &dcl.Property{ + Type: "string", + GoName: "MetricSource", + GoType: "ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum", + Description: "Required. Default metrics are collected unless `metricOverrides` are specified for the metric source (see [Available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) for more information). Possible values: METRIC_SOURCE_UNSPECIFIED, MONITORING_AGENT_DEFAULTS, HDFS, SPARK, YARN, SPARK_HISTORY_SERVER, HIVESERVER2", + Immutable: true, + Enum: []string{ + "METRIC_SOURCE_UNSPECIFIED", + "MONITORING_AGENT_DEFAULTS", + "HDFS", + "SPARK", + "YARN", + "SPARK_HISTORY_SERVER", + "HIVESERVER2", + }, + }, + }, + }, + }, + }, + }, + "encryptionConfig": &dcl.Property{ + Type: "object", + GoName: "EncryptionConfig", + GoType: "ClusterConfigEncryptionConfig", + Description: "Optional. Encryption settings for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "gcePdKmsKeyName": &dcl.Property{ + Type: "string", + GoName: "GcePdKmsKeyName", + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudkms/CryptoKey", + Field: "selfLink", + }, + }, + }, + }, + }, + "endpointConfig": &dcl.Property{ + Type: "object", + GoName: "EndpointConfig", + GoType: "ClusterConfigEndpointConfig", + Description: "Optional. Port/endpoint configuration for this cluster", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "enableHttpPortAccess": &dcl.Property{ + Type: "boolean", + GoName: "EnableHttpPortAccess", + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + Immutable: true, + }, + "httpPorts": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "HttpPorts", + ReadOnly: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Immutable: true, + }, + }, + }, + "gceClusterConfig": &dcl.Property{ + Type: "object", + GoName: "GceClusterConfig", + GoType: "ClusterConfigGceClusterConfig", + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "confidentialInstanceConfig": &dcl.Property{ + Type: "object", + GoName: "ConfidentialInstanceConfig", + GoType: "ClusterConfigGceClusterConfigConfidentialInstanceConfig", + Description: "Optional. Confidential Instance Config for clusters using [Confidential VMs](https://cloud.google.com/compute/confidential-vm/docs).", + Immutable: true, + Properties: map[string]*dcl.Property{ + "enableConfidentialCompute": &dcl.Property{ + Type: "boolean", + GoName: "EnableConfidentialCompute", + Description: "Optional. Defines whether the instance should have confidential compute enabled.", + Immutable: true, + }, + }, + }, + "internalIPOnly": &dcl.Property{ + Type: "boolean", + GoName: "InternalIPOnly", + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + Immutable: true, + ServerDefault: true, + }, + "metadata": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Metadata", + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Immutable: true, + }, + "network": &dcl.Property{ + Type: "string", + GoName: "Network", + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + Immutable: true, + ServerDefault: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Network", + Field: "selfLink", + }, + }, + }, + "nodeGroupAffinity": &dcl.Property{ + Type: "object", + GoName: "NodeGroupAffinity", + GoType: "ClusterConfigGceClusterConfigNodeGroupAffinity", + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + Immutable: true, + Required: []string{ + "nodeGroup", + }, + Properties: map[string]*dcl.Property{ + "nodeGroup": &dcl.Property{ + Type: "string", + GoName: "NodeGroup", + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/NodeGroup", + Field: "selfLink", + }, + }, + }, + }, + }, + "privateIPv6GoogleAccess": &dcl.Property{ + Type: "string", + GoName: "PrivateIPv6GoogleAccess", + GoType: "ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + Immutable: true, + Enum: []string{ + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", + "INHERIT_FROM_SUBNETWORK", + "OUTBOUND", + "BIDIRECTIONAL", + }, + }, + "reservationAffinity": &dcl.Property{ + Type: "object", + GoName: "ReservationAffinity", + GoType: "ClusterConfigGceClusterConfigReservationAffinity", + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "consumeReservationType": &dcl.Property{ + Type: "string", + GoName: "ConsumeReservationType", + GoType: "ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + Immutable: true, + Enum: []string{ + "TYPE_UNSPECIFIED", + "NO_RESERVATION", + "ANY_RESERVATION", + "SPECIFIC_RESERVATION", + }, + }, + "key": &dcl.Property{ + Type: "string", + GoName: "Key", + Description: "Optional. Corresponds to the label key of reservation resource.", + Immutable: true, + }, + "values": &dcl.Property{ + Type: "array", + GoName: "Values", + Description: "Optional. Corresponds to the label values of reservation resource.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "serviceAccount": &dcl.Property{ + Type: "string", + GoName: "ServiceAccount", + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + }, + "serviceAccountScopes": &dcl.Property{ + Type: "array", + GoName: "ServiceAccountScopes", + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "shieldedInstanceConfig": &dcl.Property{ + Type: "object", + GoName: "ShieldedInstanceConfig", + GoType: "ClusterConfigGceClusterConfigShieldedInstanceConfig", + Description: "Optional. Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).", + Immutable: true, + Properties: map[string]*dcl.Property{ + "enableIntegrityMonitoring": &dcl.Property{ + Type: "boolean", + GoName: "EnableIntegrityMonitoring", + Description: "Optional. Defines whether instances have integrity monitoring enabled.", + Immutable: true, + }, + "enableSecureBoot": &dcl.Property{ + Type: "boolean", + GoName: "EnableSecureBoot", + Description: "Optional. Defines whether instances have Secure Boot enabled.", + Immutable: true, + }, + "enableVtpm": &dcl.Property{ + Type: "boolean", + GoName: "EnableVtpm", + Description: "Optional. Defines whether instances have the vTPM enabled.", + Immutable: true, + }, + }, + }, + "subnetwork": &dcl.Property{ + Type: "string", + GoName: "Subnetwork", + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Subnetwork", + Field: "selfLink", + }, + }, + }, + "tags": &dcl.Property{ + Type: "array", + GoName: "Tags", + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Immutable: true, + SendEmpty: true, + ListType: "set", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "zone": &dcl.Property{ + Type: "string", + GoName: "Zone", + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + Immutable: true, + }, + }, + }, + "gkeClusterConfig": &dcl.Property{ + Type: "object", + GoName: "GkeClusterConfig", + GoType: "ClusterConfigGkeClusterConfig", + Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "namespacedGkeDeploymentTarget": &dcl.Property{ + Type: "object", + GoName: "NamespacedGkeDeploymentTarget", + GoType: "ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", + Description: "Optional. A target for the deployment.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "clusterNamespace": &dcl.Property{ + Type: "string", + GoName: "ClusterNamespace", + Description: "Optional. A namespace within the GKE cluster to deploy into.", + Immutable: true, + }, + "targetGkeCluster": &dcl.Property{ + Type: "string", + GoName: "TargetGkeCluster", + Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "name", + }, + }, + }, + }, + }, + }, + }, + "initializationActions": &dcl.Property{ + Type: "array", + GoName: "InitializationActions", + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigInitializationActions", + Required: []string{ + "executableFile", + }, + Properties: map[string]*dcl.Property{ + "executableFile": &dcl.Property{ + Type: "string", + GoName: "ExecutableFile", + Description: "Required. Cloud Storage URI of executable file.", + Immutable: true, + }, + "executionTimeout": &dcl.Property{ + Type: "string", + GoName: "ExecutionTimeout", + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + Immutable: true, + }, + }, + }, + }, + "lifecycleConfig": &dcl.Property{ + Type: "object", + GoName: "LifecycleConfig", + GoType: "ClusterConfigLifecycleConfig", + Description: "Optional. Lifecycle setting for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "autoDeleteTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "AutoDeleteTime", + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "autoDeleteTtl": &dcl.Property{ + Type: "string", + GoName: "AutoDeleteTtl", + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "idleDeleteTtl": &dcl.Property{ + Type: "string", + GoName: "IdleDeleteTtl", + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "idleStartTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "IdleStartTime", + ReadOnly: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + }, + }, + "masterConfig": &dcl.Property{ + Type: "object", + GoName: "MasterConfig", + GoType: "ClusterConfigMasterConfig", + Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigMasterConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "ClusterConfigMasterConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "localSsdInterface": &dcl.Property{ + Type: "string", + GoName: "LocalSsdInterface", + Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "instanceReferences": &dcl.Property{ + Type: "array", + GoName: "InstanceReferences", + ReadOnly: true, + Description: "Output only. List of references to Compute Engine instances.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigMasterConfigInstanceReferences", + Properties: map[string]*dcl.Property{ + "instanceId": &dcl.Property{ + Type: "string", + GoName: "InstanceId", + Description: "The unique identifier of the Compute Engine instance.", + Immutable: true, + }, + "instanceName": &dcl.Property{ + Type: "string", + GoName: "InstanceName", + Description: "The user-friendly name of the Compute Engine instance.", + Immutable: true, + }, + "publicEciesKey": &dcl.Property{ + Type: "string", + GoName: "PublicEciesKey", + Description: "The public ECIES key used for sharing data with this instance.", + Immutable: true, + }, + "publicKey": &dcl.Property{ + Type: "string", + GoName: "PublicKey", + Description: "The public RSA key used for sharing data with this instance.", + Immutable: true, + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "ClusterConfigMasterConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "ClusterConfigMasterConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + }, + }, + }, + }, + "metastoreConfig": &dcl.Property{ + Type: "object", + GoName: "MetastoreConfig", + GoType: "ClusterConfigMetastoreConfig", + Description: "Optional. Metastore configuration.", + Immutable: true, + Required: []string{ + "dataprocMetastoreService", + }, + Properties: map[string]*dcl.Property{ + "dataprocMetastoreService": &dcl.Property{ + Type: "string", + GoName: "DataprocMetastoreService", + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Metastore/Service", + Field: "selfLink", + }, + }, + }, + }, + }, + "secondaryWorkerConfig": &dcl.Property{ + Type: "object", + GoName: "SecondaryWorkerConfig", + GoType: "ClusterConfigSecondaryWorkerConfig", + Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigSecondaryWorkerConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "ClusterConfigSecondaryWorkerConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "localSsdInterface": &dcl.Property{ + Type: "string", + GoName: "LocalSsdInterface", + Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "instanceReferences": &dcl.Property{ + Type: "array", + GoName: "InstanceReferences", + ReadOnly: true, + Description: "Output only. List of references to Compute Engine instances.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigSecondaryWorkerConfigInstanceReferences", + Properties: map[string]*dcl.Property{ + "instanceId": &dcl.Property{ + Type: "string", + GoName: "InstanceId", + Description: "The unique identifier of the Compute Engine instance.", + Immutable: true, + }, + "instanceName": &dcl.Property{ + Type: "string", + GoName: "InstanceName", + Description: "The user-friendly name of the Compute Engine instance.", + Immutable: true, + }, + "publicEciesKey": &dcl.Property{ + Type: "string", + GoName: "PublicEciesKey", + Description: "The public ECIES key used for sharing data with this instance.", + Immutable: true, + }, + "publicKey": &dcl.Property{ + Type: "string", + GoName: "PublicKey", + Description: "The public RSA key used for sharing data with this instance.", + Immutable: true, + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "ClusterConfigSecondaryWorkerConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "ClusterConfigSecondaryWorkerConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + }, + }, + }, + }, + "securityConfig": &dcl.Property{ + Type: "object", + GoName: "SecurityConfig", + GoType: "ClusterConfigSecurityConfig", + Description: "Optional. Security settings for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "identityConfig": &dcl.Property{ + Type: "object", + GoName: "IdentityConfig", + GoType: "ClusterConfigSecurityConfigIdentityConfig", + Description: "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.", + Immutable: true, + Required: []string{ + "userServiceAccountMapping", + }, + Properties: map[string]*dcl.Property{ + "userServiceAccountMapping": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "UserServiceAccountMapping", + Description: "Required. Map of user to service account.", + Immutable: true, + }, + }, + }, + "kerberosConfig": &dcl.Property{ + Type: "object", + GoName: "KerberosConfig", + GoType: "ClusterConfigSecurityConfigKerberosConfig", + Description: "Optional. Kerberos related configuration.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "crossRealmTrustAdminServer": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustAdminServer", + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + Immutable: true, + }, + "crossRealmTrustKdc": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustKdc", + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + Immutable: true, + }, + "crossRealmTrustRealm": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustRealm", + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + Immutable: true, + }, + "crossRealmTrustSharedPassword": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustSharedPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + Immutable: true, + }, + "enableKerberos": &dcl.Property{ + Type: "boolean", + GoName: "EnableKerberos", + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + Immutable: true, + }, + "kdcDbKey": &dcl.Property{ + Type: "string", + GoName: "KdcDbKey", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + Immutable: true, + }, + "keyPassword": &dcl.Property{ + Type: "string", + GoName: "KeyPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + "keystore": &dcl.Property{ + Type: "string", + GoName: "Keystore", + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + Immutable: true, + }, + "keystorePassword": &dcl.Property{ + Type: "string", + GoName: "KeystorePassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + "kmsKey": &dcl.Property{ + Type: "string", + GoName: "KmsKey", + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudkms/CryptoKey", + Field: "selfLink", + }, + }, + }, + "realm": &dcl.Property{ + Type: "string", + GoName: "Realm", + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + Immutable: true, + }, + "rootPrincipalPassword": &dcl.Property{ + Type: "string", + GoName: "RootPrincipalPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + Immutable: true, + }, + "tgtLifetimeHours": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "TgtLifetimeHours", + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + Immutable: true, + }, + "truststore": &dcl.Property{ + Type: "string", + GoName: "Truststore", + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + Immutable: true, + }, + "truststorePassword": &dcl.Property{ + Type: "string", + GoName: "TruststorePassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + }, + }, + }, + }, + "softwareConfig": &dcl.Property{ + Type: "object", + GoName: "SoftwareConfig", + GoType: "ClusterConfigSoftwareConfig", + Description: "Optional. The config settings for software inside the cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "imageVersion": &dcl.Property{ + Type: "string", + GoName: "ImageVersion", + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + Immutable: true, + }, + "optionalComponents": &dcl.Property{ + Type: "array", + GoName: "OptionalComponents", + Description: "Optional. The set of components to activate on the cluster.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "ClusterConfigSoftwareConfigOptionalComponentsEnum", + Enum: []string{ + "COMPONENT_UNSPECIFIED", + "ANACONDA", + "DOCKER", + "DRUID", + "FLINK", + "HBASE", + "HIVE_WEBHCAT", + "JUPYTER", + "KERBEROS", + "PRESTO", + "RANGER", + "SOLR", + "ZEPPELIN", + "ZOOKEEPER", + }, + }, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Immutable: true, + }, + }, + }, + "stagingBucket": &dcl.Property{ + Type: "string", + GoName: "StagingBucket", + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + Immutable: true, + ServerDefault: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Storage/Bucket", + Field: "name", + }, + }, + }, + "tempBucket": &dcl.Property{ + Type: "string", + GoName: "TempBucket", + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + Immutable: true, + ServerDefault: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Storage/Bucket", + Field: "name", + }, + }, + }, + "workerConfig": &dcl.Property{ + Type: "object", + GoName: "WorkerConfig", + GoType: "ClusterConfigWorkerConfig", + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigWorkerConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "ClusterConfigWorkerConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "localSsdInterface": &dcl.Property{ + Type: "string", + GoName: "LocalSsdInterface", + Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "instanceReferences": &dcl.Property{ + Type: "array", + GoName: "InstanceReferences", + ReadOnly: true, + Description: "Output only. List of references to Compute Engine instances.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterConfigWorkerConfigInstanceReferences", + Properties: map[string]*dcl.Property{ + "instanceId": &dcl.Property{ + Type: "string", + GoName: "InstanceId", + Description: "The unique identifier of the Compute Engine instance.", + Immutable: true, + }, + "instanceName": &dcl.Property{ + Type: "string", + GoName: "InstanceName", + Description: "The user-friendly name of the Compute Engine instance.", + Immutable: true, + }, + "publicEciesKey": &dcl.Property{ + Type: "string", + GoName: "PublicEciesKey", + Description: "The public ECIES key used for sharing data with this instance.", + Immutable: true, + }, + "publicKey": &dcl.Property{ + Type: "string", + GoName: "PublicKey", + Description: "The public RSA key used for sharing data with this instance.", + Immutable: true, + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "ClusterConfigWorkerConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "ClusterConfigWorkerConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + }, + }, + }, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource, usually a GCP region.", + Immutable: true, + Parameter: true, + }, + "metrics": &dcl.Property{ + Type: "object", + GoName: "Metrics", + GoType: "ClusterMetrics", + ReadOnly: true, + Description: "Output only. Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "hdfsMetrics": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "HdfsMetrics", + Description: "The HDFS metrics.", + Immutable: true, + }, + "yarnMetrics": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "YarnMetrics", + Description: "The YARN metrics.", + Immutable: true, + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused.", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "Required. The Google Cloud Platform project ID that the cluster belongs to.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "status": &dcl.Property{ + Type: "object", + GoName: "Status", + GoType: "ClusterStatus", + ReadOnly: true, + Description: "Output only. Cluster status.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "detail": &dcl.Property{ + Type: "string", + GoName: "Detail", + ReadOnly: true, + Description: "Optional. Output only. Details of cluster's state.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "ClusterStatusStateEnum", + ReadOnly: true, + Description: "Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING", + Immutable: true, + Enum: []string{ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "DELETING", + "UPDATING", + "STOPPING", + "STOPPED", + "STARTING", + }, + }, + "stateStartTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "StateStartTime", + ReadOnly: true, + Description: "Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "substate": &dcl.Property{ + Type: "string", + GoName: "Substate", + GoType: "ClusterStatusSubstateEnum", + ReadOnly: true, + Description: "Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS", + Immutable: true, + Enum: []string{ + "UNSPECIFIED", + "UNHEALTHY", + "STALE_STATUS", + }, + }, + }, + }, + "statusHistory": &dcl.Property{ + Type: "array", + GoName: "StatusHistory", + ReadOnly: true, + Description: "Output only. The previous cluster status.", + Immutable: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterStatusHistory", + Properties: map[string]*dcl.Property{ + "detail": &dcl.Property{ + Type: "string", + GoName: "Detail", + ReadOnly: true, + Description: "Optional. Output only. Details of cluster's state.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "ClusterStatusHistoryStateEnum", + ReadOnly: true, + Description: "Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING", + Immutable: true, + Enum: []string{ + "UNKNOWN", + "CREATING", + "RUNNING", + "ERROR", + "DELETING", + "UPDATING", + "STOPPING", + "STOPPED", + "STARTING", + }, + }, + "stateStartTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "StateStartTime", + ReadOnly: true, + Description: "Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "substate": &dcl.Property{ + Type: "string", + GoName: "Substate", + GoType: "ClusterStatusHistorySubstateEnum", + ReadOnly: true, + Description: "Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS", + Immutable: true, + Enum: []string{ + "UNSPECIFIED", + "UNHEALTHY", + "STALE_STATUS", + }, + }, + }, + }, + }, + "virtualClusterConfig": &dcl.Property{ + Type: "object", + GoName: "VirtualClusterConfig", + GoType: "ClusterVirtualClusterConfig", + Description: "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a [Dataproc-on-GKE cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.", + Immutable: true, + Required: []string{ + "kubernetesClusterConfig", + }, + Properties: map[string]*dcl.Property{ + "auxiliaryServicesConfig": &dcl.Property{ + Type: "object", + GoName: "AuxiliaryServicesConfig", + GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfig", + Description: "Optional. Configuration of auxiliary services used by this cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "metastoreConfig": &dcl.Property{ + Type: "object", + GoName: "MetastoreConfig", + GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", + Description: "Optional. The Hive Metastore configuration for this workload.", + Immutable: true, + Required: []string{ + "dataprocMetastoreService", + }, + Properties: map[string]*dcl.Property{ + "dataprocMetastoreService": &dcl.Property{ + Type: "string", + GoName: "DataprocMetastoreService", + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Metastore/Service", + Field: "selfLink", + }, + }, + }, + }, + }, + "sparkHistoryServerConfig": &dcl.Property{ + Type: "object", + GoName: "SparkHistoryServerConfig", + GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", + Description: "Optional. The Spark History Server configuration for the workload.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "dataprocCluster": &dcl.Property{ + Type: "string", + GoName: "DataprocCluster", + Description: "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. Example: * `projects/[project_id]/regions/[region]/clusters/[cluster_name]`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Dataproc/Cluster", + Field: "selfLink", + }, + }, + }, + }, + }, + }, + }, + "kubernetesClusterConfig": &dcl.Property{ + Type: "object", + GoName: "KubernetesClusterConfig", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfig", + Description: "Required. The configuration for running the Dataproc cluster on Kubernetes.", + Immutable: true, + Required: []string{ + "gkeClusterConfig", + }, + Properties: map[string]*dcl.Property{ + "gkeClusterConfig": &dcl.Property{ + Type: "object", + GoName: "GkeClusterConfig", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", + Description: "Required. The configuration for running the Dataproc cluster on GKE.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "gkeClusterTarget": &dcl.Property{ + Type: "string", + GoName: "GkeClusterTarget", + Description: "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "selfLink", + }, + }, + }, + "nodePoolTarget": &dcl.Property{ + Type: "array", + GoName: "NodePoolTarget", + Description: "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the `DEFAULT` GkeNodePoolTarget.Role. If a `GkeNodePoolTarget` is not specified, Dataproc constructs a `DEFAULT` `GkeNodePoolTarget`. Each role can be given to only one `GkeNodePoolTarget`. All node pools must have the same location settings.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", + Required: []string{ + "nodePool", + "roles", + }, + Properties: map[string]*dcl.Property{ + "nodePool": &dcl.Property{ + Type: "string", + GoName: "NodePool", + Description: "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/NodePool", + Field: "selfLink", + }, + }, + }, + "nodePoolConfig": &dcl.Property{ + Type: "object", + GoName: "NodePoolConfig", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", + Description: "Input only. The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail. If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values. This is an input only field. It will not be returned by the API.", + Immutable: true, + Unreadable: true, + Properties: map[string]*dcl.Property{ + "autoscaling": &dcl.Property{ + Type: "object", + GoName: "Autoscaling", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", + Description: "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "maxNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxNodeCount", + Description: "The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. **Note:** Quota must be sufficient to scale up the cluster.", + Immutable: true, + }, + "minNodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MinNodeCount", + Description: "The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.", + Immutable: true, + }, + }, + }, + "config": &dcl.Property{ + Type: "object", + GoName: "Config", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", + Description: "Optional. The node pool configuration.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. A list of [hardware accelerators](https://cloud.google.com/compute/docs/gpus) to attach to each node.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of accelerator cards exposed to an instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "The accelerator type resource namename (see GPUs on Compute Engine).", + Immutable: true, + }, + "gpuPartitionSize": &dcl.Property{ + Type: "string", + GoName: "GpuPartitionSize", + Description: "Size of partitions to create on the GPU. Valid values are described in the NVIDIA [mig user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", + Immutable: true, + }, + }, + }, + }, + "bootDiskKmsKey": &dcl.Property{ + Type: "string", + GoName: "BootDiskKmsKey", + Description: "Optional. The [Customer Managed Encryption Key (CMEK)] (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: `projects/KEY_PROJECT_ID/locations/LOCATION/keyRings/RING_NAME/cryptoKeys/KEY_NAME`.", + Immutable: true, + }, + "ephemeralStorageConfig": &dcl.Property{ + Type: "object", + GoName: "EphemeralStorageConfig", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", + Description: "Optional. Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "localSsdCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "LocalSsdCount", + Description: "Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage.", + Immutable: true, + }, + }, + }, + "localSsdCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "LocalSsdCount", + Description: "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see [Adding Local SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The name of a Compute Engine [machine type](https://cloud.google.com/compute/docs/machine-types).", + Immutable: true, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. [Minimum CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", + Immutable: true, + }, + "preemptible": &dcl.Property{ + Type: "boolean", + GoName: "Preemptible", + Description: "Optional. Whether the nodes are created as legacy [preemptible VM instances] (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the `CONTROLLER` [role] (/dataproc/docs/reference/rest/v1/projects.regions.clusters#role) or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", + Immutable: true, + }, + "spot": &dcl.Property{ + Type: "boolean", + GoName: "Spot", + Description: "Optional. Whether the nodes are created as [Spot VM instances] (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the `CONTROLLER` [role](/dataproc/docs/reference/rest/v1/projects.regions.clusters#role) or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", + Immutable: true, + }, + }, + }, + "locations": &dcl.Property{ + Type: "array", + GoName: "Locations", + Description: "Optional. The list of Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located. **Note:** All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region. If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "roles": &dcl.Property{ + Type: "array", + GoName: "Roles", + Description: "Required. The roles associated with the GKE node pool.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum", + Enum: []string{ + "ROLE_UNSPECIFIED", + "DEFAULT", + "CONTROLLER", + "SPARK_DRIVER", + "SPARK_EXECUTOR", + }, + }, + }, + }, + }, + }, + }, + }, + "kubernetesNamespace": &dcl.Property{ + Type: "string", + GoName: "KubernetesNamespace", + Description: "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", + Immutable: true, + }, + "kubernetesSoftwareConfig": &dcl.Property{ + Type: "object", + GoName: "KubernetesSoftwareConfig", + GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", + Description: "Optional. The software configuration for this Dataproc cluster running on Kubernetes.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "componentVersion": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "ComponentVersion", + Description: "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `spark:spark.kubernetes.container.image`. The following are supported prefixes and their mappings: * spark: `spark-defaults.conf` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Immutable: true, + }, + }, + }, + }, + }, + "stagingBucket": &dcl.Property{ + Type: "string", + GoName: "StagingBucket", + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a `gs://...` URI to a Cloud Storage bucket.**", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Storage/Bucket", + Field: "name", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl b/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl new file mode 100644 index 000000000000..ab646deddc6f --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl @@ -0,0 +1,32 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func encodeJobCreateRequest(m map[string]any) map[string]any { + req := make(map[string]any, 1) + dcl.PutMapEntry(req, []string{"job"}, m) + return req +} + +{{- if eq $.TargetVersionName "ga" }} +/* +{{- end }} +func expandClusterProject(_ *Client, project *string, _ *Cluster) (*string, error) { + return dcl.SelfLinkToName(project), nil +} + +// CompareClusterConfigMasterConfigNewStyle exposes the compareClusterConfigMasterConfigNewStyle function for testing. +func CompareClusterConfigMasterConfigNewStyle(d, a any, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + return compareClusterConfigMasterConfigNewStyle(d, a, fn) +} +{{- if eq $.TargetVersionName "ga" }} +*/ +{{- end }} + +func canonicalizeSoftwareConfigProperties(o, n any) bool { + // This field is a map that contains both client provided and server provided values. It + // is also immutable, so always return "no diff". + return true +} diff --git a/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go new file mode 100644 index 000000000000..f75e4dc8e066 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLDataprocClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.DataprocBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl new file mode 100644 index 000000000000..f27b46655afe --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl @@ -0,0 +1,4377 @@ +package dataproc + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataprocWorkflowTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocWorkflowTemplateCreate, + Read: resourceDataprocWorkflowTemplateRead, + Update: resourceDataprocWorkflowTemplateUpdate, + Delete: resourceDataprocWorkflowTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocWorkflowTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceDataprocWorkflowTemplateResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceDataprocWorkflowTemplateUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "jobs": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The Directed Acyclic Graph of Jobs to submit.", + Elem: DataprocWorkflowTemplateJobsSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", + }, + + "placement": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. WorkflowTemplate scheduling information.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementSchema(), + }, + + "dag_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The encryption configuration for the workflow template.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateEncryptionConfigSchema(), + }, + + "parameters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + Elem: DataprocWorkflowTemplateParametersSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "version": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Output only. The current version of this workflow template.", + Deprecated: "version is not useful as a configurable field, and will be removed in the future.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was last updated.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + }, + + "hadoop_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hadoop job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobSchema(), + }, + + "hive_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hive job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "pig_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Pig job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobSchema(), + }, + + "prerequisite_step_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "presto_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Presto job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobSchema(), + }, + + "pyspark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a PySpark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobSchema(), + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job scheduling configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSchedulingSchema(), + }, + + "spark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Spark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobSchema(), + }, + + "spark_r_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkR job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobSchema(), + }, + + "spark_sql_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkSql job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains Hive queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains the Pig queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Presto client tags to attach to this query", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema(), + }, + + "output_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobQueryListSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchedulingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", + }, + + "max_failures_total": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file that contains the main class.", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_r_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_selector": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementClusterSelectorSchema(), + }, + + "managed_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A cluster that is managed by the workflow.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementClusterSelectorSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_labels": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Description: "Required. The cluster labels. Cluster must have all labels to match.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The cluster configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchema(), + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Encryption settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchema(), + }, + + "endpoint_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Port/endpoint configuration for this cluster", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchema(), + }, + + "gce_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema(), + }, + +{{- if ne $.TargetVersionName "ga" }} + "gke_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchema(), + }, + +{{- end }} + "initialization_actions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchema(), + }, + + "lifecycle_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Lifecycle setting for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchema(), + }, + + "master_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchema(), +{{- if ne $.TargetVersionName "ga" }} + }, + + "metastore_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Metastore configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchema(), +{{- end }} + }, + + "secondary_worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchema(), + }, + + "security_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Security settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchema(), + }, + + "software_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The config settings for software inside the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchema(), + }, + + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "temp_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gce_pd_kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_http_port_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + }, + + "http_ports": { + Type: schema.TypeMap, + Computed: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "internal_ip_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + }, + + "node_group_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchema(), + }, + + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + }, + + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchema(), + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + }, + + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchema(), + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label key of reservation resource.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label values of reservation resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether instances have integrity monitoring enabled. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not.", + }, + + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instances have Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instance have the vTPM enabled. Virtual Trusted Platform Module protects objects like keys, certificates and enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline.", + }, + }, + } +} + +{{- if ne $.TargetVersionName "ga" }} +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaced_gke_deployment_target": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A target for the deployment.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. A namespace within the GKE cluster to deploy into.", + }, + + "target_gke_cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + }, + }, + } +} + +{{- end }} +func DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "executable_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Required. Cloud Storage URI of executable file.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete_time": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "auto_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_start_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_metastore_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", +{{- end }} + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kerberos_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Kerberos related configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cross_realm_trust_admin_server": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_kdc": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + }, + + "cross_realm_trust_shared_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + }, + + "enable_kerberos": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + }, + + "kdc_db_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + }, + + "key_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "keystore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "keystore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + }, + + "realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + }, + + "root_principal_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + }, + + "tgt_lifetime_hours": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + }, + + "truststore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "truststore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + }, + + "optional_components": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The set of components to activate on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplateEncryptionConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for encryption.", + }, + }, + } +} + +func DataprocWorkflowTemplateParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", + }, + + "validation": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Validation rules to be applied to this parameter's value.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on regular expressions.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationRegexSchema(), + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on a list of allowed values.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationValuesSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationRegexSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regexes": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. List of allowed values for the parameter.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := dcl.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkflowTemplate(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkflowTemplate %q: %#v", d.Id(), res) + + return resourceDataprocWorkflowTemplateRead(d, meta) +} + +func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkflowTemplate(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataprocWorkflowTemplate %q", d.Id()) + return dcl.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("jobs", flattenDataprocWorkflowTemplateJobsArray(res.Jobs)); err != nil { + return fmt.Errorf("error setting jobs in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("placement", flattenDataprocWorkflowTemplatePlacement(res.Placement)); err != nil { + return fmt.Errorf("error setting placement in state: %s", err) + } + if err = d.Set("dag_timeout", res.DagTimeout); err != nil { + return fmt.Errorf("error setting dag_timeout in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("encryption_config", flattenDataprocWorkflowTemplateEncryptionConfig(res.EncryptionConfig)); err != nil { + return fmt.Errorf("error setting encryption_config in state: %s", err) + } + if err = d.Set("parameters", flattenDataprocWorkflowTemplateParametersArray(res.Parameters)); err != nil { + return fmt.Errorf("error setting parameters in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenDataprocWorkflowTemplateLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("terraform_labels", flattenDataprocWorkflowTemplateTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceDataprocWorkflowTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the root field "labels" and "terraform_labels" are mutable + + return resourceDataprocWorkflowTemplateRead(d, meta) +} + +func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + EncryptionConfig: expandDataprocWorkflowTemplateEncryptionConfig(d.Get("encryption_config")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + log.Printf("[DEBUG] Deleting WorkflowTemplate %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkflowTemplate(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkflowTemplate %q", d.Id()) + return nil +} + +func resourceDataprocWorkflowTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workflowTemplates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataprocWorkflowTemplateJobsArray(o interface{}) []WorkflowTemplateJobs { + if o == nil { + return make([]WorkflowTemplateJobs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplateJobs, 0) + } + + items := make([]WorkflowTemplateJobs, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateJobs(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateJobs(o interface{}) *WorkflowTemplateJobs { + if o == nil { + return EmptyWorkflowTemplateJobs + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplateJobs{ + StepId: dcl.String(obj["step_id"].(string)), + HadoopJob: expandDataprocWorkflowTemplateJobsHadoopJob(obj["hadoop_job"]), + HiveJob: expandDataprocWorkflowTemplateJobsHiveJob(obj["hive_job"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), + PigJob: expandDataprocWorkflowTemplateJobsPigJob(obj["pig_job"]), + PrerequisiteStepIds: dcl.ExpandStringArray(obj["prerequisite_step_ids"]), + PrestoJob: expandDataprocWorkflowTemplateJobsPrestoJob(obj["presto_job"]), + PysparkJob: expandDataprocWorkflowTemplateJobsPysparkJob(obj["pyspark_job"]), + Scheduling: expandDataprocWorkflowTemplateJobsScheduling(obj["scheduling"]), + SparkJob: expandDataprocWorkflowTemplateJobsSparkJob(obj["spark_job"]), + SparkRJob: expandDataprocWorkflowTemplateJobsSparkRJob(obj["spark_r_job"]), + SparkSqlJob: expandDataprocWorkflowTemplateJobsSparkSqlJob(obj["spark_sql_job"]), + } +} + +func flattenDataprocWorkflowTemplateJobsArray(objs []WorkflowTemplateJobs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateJobs(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateJobs(obj *WorkflowTemplateJobs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "step_id": obj.StepId, + "hadoop_job": flattenDataprocWorkflowTemplateJobsHadoopJob(obj.HadoopJob), + "hive_job": flattenDataprocWorkflowTemplateJobsHiveJob(obj.HiveJob), + "labels": obj.Labels, + "pig_job": flattenDataprocWorkflowTemplateJobsPigJob(obj.PigJob), + "prerequisite_step_ids": obj.PrerequisiteStepIds, + "presto_job": flattenDataprocWorkflowTemplateJobsPrestoJob(obj.PrestoJob), + "pyspark_job": flattenDataprocWorkflowTemplateJobsPysparkJob(obj.PysparkJob), + "scheduling": flattenDataprocWorkflowTemplateJobsScheduling(obj.Scheduling), + "spark_job": flattenDataprocWorkflowTemplateJobsSparkJob(obj.SparkJob), + "spark_r_job": flattenDataprocWorkflowTemplateJobsSparkRJob(obj.SparkRJob), + "spark_sql_job": flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj.SparkSqlJob), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateJobsHadoopJob(o interface{}) *WorkflowTemplateJobsHadoopJob { + if o == nil { + return EmptyWorkflowTemplateJobsHadoopJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHadoopJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHadoopJob{ + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJob(obj *WorkflowTemplateJobsHadoopJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(o interface{}) *WorkflowTemplateJobsHadoopJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHadoopJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj *WorkflowTemplateJobsHadoopJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJob(o interface{}) *WorkflowTemplateJobsHiveJob { + if o == nil { + return EmptyWorkflowTemplateJobsHiveJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHiveJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHiveJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsHiveJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJob(obj *WorkflowTemplateJobsHiveJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJobQueryList(o interface{}) *WorkflowTemplateJobsHiveJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsHiveJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj *WorkflowTemplateJobsHiveJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJob(o interface{}) *WorkflowTemplateJobsPigJob { + if o == nil { + return EmptyWorkflowTemplateJobsPigJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPigJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJob(obj *WorkflowTemplateJobsPigJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPigJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj *WorkflowTemplateJobsPigJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobQueryList(o interface{}) *WorkflowTemplateJobsPigJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPigJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj *WorkflowTemplateJobsPigJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJob(o interface{}) *WorkflowTemplateJobsPrestoJob { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJob{ + ClientTags: dcl.ExpandStringArray(obj["client_tags"]), + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + LoggingConfig: expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj["logging_config"]), + OutputFormat: dcl.String(obj["output_format"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPrestoJobQueryList(obj["query_list"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJob(obj *WorkflowTemplateJobsPrestoJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "client_tags": obj.ClientTags, + "continue_on_failure": obj.ContinueOnFailure, + "logging_config": flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj.LoggingConfig), + "output_format": obj.OutputFormat, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj.QueryList), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPrestoJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj *WorkflowTemplateJobsPrestoJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobQueryList(o interface{}) *WorkflowTemplateJobsPrestoJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPrestoJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj *WorkflowTemplateJobsPrestoJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJob(o interface{}) *WorkflowTemplateJobsPysparkJob { + if o == nil { + return EmptyWorkflowTemplateJobsPysparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPysparkJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPysparkJob{ + MainPythonFileUri: dcl.String(obj["main_python_file_uri"].(string)), + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + PythonFileUris: dcl.ExpandStringArray(obj["python_file_uris"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJob(obj *WorkflowTemplateJobsPysparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_python_file_uri": obj.MainPythonFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "python_file_uris": obj.PythonFileUris, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(o interface{}) *WorkflowTemplateJobsPysparkJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsPysparkJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj *WorkflowTemplateJobsPysparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsScheduling(o interface{}) *WorkflowTemplateJobsScheduling { + if o == nil { + return EmptyWorkflowTemplateJobsScheduling + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsScheduling + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsScheduling{ + MaxFailuresPerHour: dcl.Int64(int64(obj["max_failures_per_hour"].(int))), + MaxFailuresTotal: dcl.Int64(int64(obj["max_failures_total"].(int))), + } +} + +func flattenDataprocWorkflowTemplateJobsScheduling(obj *WorkflowTemplateJobsScheduling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_failures_per_hour": obj.MaxFailuresPerHour, + "max_failures_total": obj.MaxFailuresTotal, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJob(o interface{}) *WorkflowTemplateJobsSparkJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkJob{ + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJob(obj *WorkflowTemplateJobsSparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj *WorkflowTemplateJobsSparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJob(o interface{}) *WorkflowTemplateJobsSparkRJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkRJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkRJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkRJob{ + MainRFileUri: dcl.String(obj["main_r_file_uri"].(string)), + ArchiveUris: dcl.ExpandStringArray(obj["archive_uris"]), + Args: dcl.ExpandStringArray(obj["args"]), + FileUris: dcl.ExpandStringArray(obj["file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJob(obj *WorkflowTemplateJobsSparkRJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_r_file_uri": obj.MainRFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkRJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkRJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj *WorkflowTemplateJobsSparkRJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJob(o interface{}) *WorkflowTemplateJobsSparkSqlJob { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJob{ + JarFileUris: dcl.ExpandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj["logging_config"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj["query_list"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj *WorkflowTemplateJobsSparkSqlJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(o interface{}) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJobLoggingConfig{ + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj *WorkflowTemplateJobsSparkSqlJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(o interface{}) *WorkflowTemplateJobsSparkSqlJobQueryList { + if o == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateJobsSparkSqlJobQueryList{ + Queries: dcl.ExpandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj *WorkflowTemplateJobsSparkSqlJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacement(o interface{}) *WorkflowTemplatePlacement { + if o == nil { + return EmptyWorkflowTemplatePlacement + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacement + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacement{ + ClusterSelector: expandDataprocWorkflowTemplatePlacementClusterSelector(obj["cluster_selector"]), + ManagedCluster: expandDataprocWorkflowTemplatePlacementManagedCluster(obj["managed_cluster"]), + } +} + +func flattenDataprocWorkflowTemplatePlacement(obj *WorkflowTemplatePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_selector": flattenDataprocWorkflowTemplatePlacementClusterSelector(obj.ClusterSelector), + "managed_cluster": flattenDataprocWorkflowTemplatePlacementManagedCluster(obj.ManagedCluster), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementClusterSelector(o interface{}) *WorkflowTemplatePlacementClusterSelector { + if o == nil { + return EmptyWorkflowTemplatePlacementClusterSelector + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementClusterSelector + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementClusterSelector{ + ClusterLabels: tpgresource.CheckStringMap(obj["cluster_labels"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementClusterSelector(obj *WorkflowTemplatePlacementClusterSelector) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_labels": obj.ClusterLabels, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedCluster(o interface{}) *WorkflowTemplatePlacementManagedCluster { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedCluster + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedCluster + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedCluster{ + ClusterName: dcl.String(obj["cluster_name"].(string)), + Config: expandDataprocWorkflowTemplatePlacementManagedClusterConfig(obj["config"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedCluster(obj *WorkflowTemplatePlacementManagedCluster) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_name": obj.ClusterName, + "config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfig(obj.Config), + "labels": obj.Labels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfig{ + AutoscalingConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj["autoscaling_config"]), + EncryptionConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj["encryption_config"]), + EndpointConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj["endpoint_config"]), + GceClusterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj["gce_cluster_config"]), +{{- if ne $.TargetVersionName "ga" }} + GkeClusterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj["gke_cluster_config"]), +{{- end }} + InitializationActions: expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(obj["initialization_actions"]), + LifecycleConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj["lifecycle_config"]), + MasterConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj["master_config"]), +{{- if ne $.TargetVersionName "ga" }} + MetastoreConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj["metastore_config"]), +{{- end }} + SecondaryWorkerConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj["secondary_worker_config"]), + SecurityConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj["security_config"]), + SoftwareConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj["software_config"]), + StagingBucket: dcl.String(obj["staging_bucket"].(string)), + TempBucket: dcl.String(obj["temp_bucket"].(string)), + WorkerConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj["worker_config"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "autoscaling_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj.AutoscalingConfig), + "encryption_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj.EncryptionConfig), + "endpoint_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj.EndpointConfig), + "gce_cluster_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj.GceClusterConfig), +{{- if ne $.TargetVersionName "ga" }} + "gke_cluster_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj.GkeClusterConfig), +{{- end }} + "initialization_actions": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(obj.InitializationActions), + "lifecycle_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj.LifecycleConfig), + "master_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj.MasterConfig), +{{- if ne $.TargetVersionName "ga" }} + "metastore_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj.MetastoreConfig), +{{- end }} + "secondary_worker_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj.SecondaryWorkerConfig), + "security_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj.SecurityConfig), + "software_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj.SoftwareConfig), + "staging_bucket": obj.StagingBucket, + "temp_bucket": obj.TempBucket, + "worker_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj.WorkerConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{ + Policy: dcl.String(obj["policy"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(obj *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "policy": obj.Policy, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{ + GcePdKmsKeyName: dcl.String(obj["gce_pd_kms_key_name"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(obj *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gce_pd_kms_key_name": obj.GcePdKmsKeyName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{ + EnableHttpPortAccess: dcl.Bool(obj["enable_http_port_access"].(bool)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(obj *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_http_port_access": obj.EnableHttpPortAccess, + "http_ports": obj.HttpPorts, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{ + InternalIPOnly: dcl.Bool(obj["internal_ip_only"].(bool)), + Metadata: tpgresource.CheckStringMap(obj["metadata"]), + Network: dcl.String(obj["network"].(string)), + NodeGroupAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj["node_group_affinity"]), + PrivateIPv6GoogleAccess: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(obj["private_ipv6_google_access"].(string)), + ReservationAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj["reservation_affinity"]), + ServiceAccount: dcl.String(obj["service_account"].(string)), + ServiceAccountScopes: dcl.ExpandStringArray(obj["service_account_scopes"]), + ShieldedInstanceConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj["shielded_instance_config"]), + Subnetwork: dcl.String(obj["subnetwork"].(string)), + Tags: dcl.ExpandStringArray(obj["tags"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "internal_ip_only": obj.InternalIPOnly, + "metadata": obj.Metadata, + "network": obj.Network, + "node_group_affinity": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj.NodeGroupAffinity), + "private_ipv6_google_access": obj.PrivateIPv6GoogleAccess, + "reservation_affinity": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj.ReservationAffinity), + "service_account": obj.ServiceAccount, + "service_account_scopes": obj.ServiceAccountScopes, + "shielded_instance_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj.ShieldedInstanceConfig), + "subnetwork": obj.Subnetwork, + "tags": obj.Tags, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{ + NodeGroup: dcl.String(obj["node_group"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "node_group": obj.NodeGroup, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{ + ConsumeReservationType: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(obj["consume_reservation_type"].(string)), + Key: dcl.String(obj["key"].(string)), + Values: dcl.ExpandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "consume_reservation_type": obj.ConsumeReservationType, + "key": obj.Key, + "values": obj.Values, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{ + EnableIntegrityMonitoring: dcl.Bool(obj["enable_integrity_monitoring"].(bool)), + EnableSecureBoot: dcl.Bool(obj["enable_secure_boot"].(bool)), + EnableVtpm: dcl.Bool(obj["enable_vtpm"].(bool)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_integrity_monitoring": obj.EnableIntegrityMonitoring, + "enable_secure_boot": obj.EnableSecureBoot, + "enable_vtpm": obj.EnableVtpm, + } + + return []interface{}{transformed} + +} +{{- if ne $.TargetVersionName "ga" }} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{ + NamespacedGkeDeploymentTarget: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj["namespaced_gke_deployment_target"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "namespaced_gke_deployment_target": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj.NamespacedGkeDeploymentTarget), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{ + ClusterNamespace: dcl.String(obj["cluster_namespace"].(string)), + TargetGkeCluster: dcl.String(obj["target_gke_cluster"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(obj *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_namespace": obj.ClusterNamespace, + "target_gke_cluster": obj.TargetGkeCluster, + } + + return []interface{}{transformed} + +} +{{- end }} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if o == nil { + return make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0) + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{ + ExecutableFile: dcl.String(obj["executable_file"].(string)), + ExecutionTimeout: dcl.String(obj["execution_timeout"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsArray(objs []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActions(obj *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "executable_file": obj.ExecutableFile, + "execution_timeout": obj.ExecutionTimeout, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{ + AutoDeleteTime: dcl.String(obj["auto_delete_time"].(string)), + AutoDeleteTtl: dcl.String(obj["auto_delete_ttl"].(string)), + IdleDeleteTtl: dcl.String(obj["idle_delete_ttl"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(obj *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "auto_delete_time": obj.AutoDeleteTime, + "auto_delete_ttl": obj.AutoDeleteTtl, + "idle_delete_ttl": obj.IdleDeleteTtl, + "idle_start_time": obj.IdleStartTime, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, +{{- if ne $.TargetVersionName "ga" }} + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{ + DataprocMetastoreService: dcl.String(obj["dataproc_metastore_service"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(obj *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "dataproc_metastore_service": obj.DataprocMetastoreService, +{{- end }} + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{ + KerberosConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj["kerberos_config"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kerberos_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj.KerberosConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{ + CrossRealmTrustAdminServer: dcl.String(obj["cross_realm_trust_admin_server"].(string)), + CrossRealmTrustKdc: dcl.String(obj["cross_realm_trust_kdc"].(string)), + CrossRealmTrustRealm: dcl.String(obj["cross_realm_trust_realm"].(string)), + CrossRealmTrustSharedPassword: dcl.String(obj["cross_realm_trust_shared_password"].(string)), + EnableKerberos: dcl.Bool(obj["enable_kerberos"].(bool)), + KdcDbKey: dcl.String(obj["kdc_db_key"].(string)), + KeyPassword: dcl.String(obj["key_password"].(string)), + Keystore: dcl.String(obj["keystore"].(string)), + KeystorePassword: dcl.String(obj["keystore_password"].(string)), + KmsKey: dcl.String(obj["kms_key"].(string)), + Realm: dcl.String(obj["realm"].(string)), + RootPrincipalPassword: dcl.String(obj["root_principal_password"].(string)), + TgtLifetimeHours: dcl.Int64(int64(obj["tgt_lifetime_hours"].(int))), + Truststore: dcl.String(obj["truststore"].(string)), + TruststorePassword: dcl.String(obj["truststore_password"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cross_realm_trust_admin_server": obj.CrossRealmTrustAdminServer, + "cross_realm_trust_kdc": obj.CrossRealmTrustKdc, + "cross_realm_trust_realm": obj.CrossRealmTrustRealm, + "cross_realm_trust_shared_password": obj.CrossRealmTrustSharedPassword, + "enable_kerberos": obj.EnableKerberos, + "kdc_db_key": obj.KdcDbKey, + "key_password": obj.KeyPassword, + "keystore": obj.Keystore, + "keystore_password": obj.KeystorePassword, + "kms_key": obj.KmsKey, + "realm": obj.Realm, + "root_principal_password": obj.RootPrincipalPassword, + "tgt_lifetime_hours": obj.TgtLifetimeHours, + "truststore": obj.Truststore, + "truststore_password": obj.TruststorePassword, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if o == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{ + ImageVersion: dcl.String(obj["image_version"].(string)), + OptionalComponents: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj["optional_components"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(obj *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "image_version": obj.ImageVersion, + "optional_components": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj.OptionalComponents), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{ + Accelerators: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsArray(objs []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(o interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(obj *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateEncryptionConfig(o interface{}) *WorkflowTemplateEncryptionConfig { + if o == nil { + return EmptyWorkflowTemplateEncryptionConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateEncryptionConfig + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateEncryptionConfig{ + KmsKey: dcl.String(obj["kms_key"].(string)), + } +} + +func flattenDataprocWorkflowTemplateEncryptionConfig(obj *WorkflowTemplateEncryptionConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key": obj.KmsKey, + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplateParametersArray(o interface{}) []WorkflowTemplateParameters { + if o == nil { + return make([]WorkflowTemplateParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]WorkflowTemplateParameters, 0) + } + + items := make([]WorkflowTemplateParameters, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateParameters(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateParameters(o interface{}) *WorkflowTemplateParameters { + if o == nil { + return EmptyWorkflowTemplateParameters + } + + obj := o.(map[string]interface{}) + return &WorkflowTemplateParameters{ + Fields: dcl.ExpandStringArray(obj["fields"]), + Name: dcl.String(obj["name"].(string)), + Description: dcl.String(obj["description"].(string)), + Validation: expandDataprocWorkflowTemplateParametersValidation(obj["validation"]), + } +} + +func flattenDataprocWorkflowTemplateParametersArray(objs []WorkflowTemplateParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateParameters(obj *WorkflowTemplateParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "fields": obj.Fields, + "name": obj.Name, + "description": obj.Description, + "validation": flattenDataprocWorkflowTemplateParametersValidation(obj.Validation), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateParametersValidation(o interface{}) *WorkflowTemplateParametersValidation { + if o == nil { + return EmptyWorkflowTemplateParametersValidation + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidation + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidation{ + Regex: expandDataprocWorkflowTemplateParametersValidationRegex(obj["regex"]), + Values: expandDataprocWorkflowTemplateParametersValidationValues(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidation(obj *WorkflowTemplateParametersValidation) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regex": flattenDataprocWorkflowTemplateParametersValidationRegex(obj.Regex), + "values": flattenDataprocWorkflowTemplateParametersValidationValues(obj.Values), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationRegex(o interface{}) *WorkflowTemplateParametersValidationRegex { + if o == nil { + return EmptyWorkflowTemplateParametersValidationRegex + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidationRegex + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidationRegex{ + Regexes: dcl.ExpandStringArray(obj["regexes"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationRegex(obj *WorkflowTemplateParametersValidationRegex) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regexes": obj.Regexes, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationValues(o interface{}) *WorkflowTemplateParametersValidationValues { + if o == nil { + return EmptyWorkflowTemplateParametersValidationValues + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyWorkflowTemplateParametersValidationValues + } + obj := objArr[0].(map[string]interface{}) + return &WorkflowTemplateParametersValidationValues{ + Values: dcl.ExpandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationValues(obj *WorkflowTemplateParametersValidationValues) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplateLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataprocWorkflowTemplateTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + objs := o.([]interface{}) + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go new file mode 100644 index 000000000000..ee81f29ac0a9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_sweeper.go @@ -0,0 +1,53 @@ +package dataproc + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("DataprocWorkflowTemplate", testSweepDataprocWorkflowTemplate) +} + +func testSweepDataprocWorkflowTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataprocWorkflowTemplate") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLDataprocClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkflowTemplate(context.Background(), d["project"], d["location"], isDeletableDataprocWorkflowTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletableDataprocWorkflowTemplate(r *WorkflowTemplate) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl new file mode 100644 index 000000000000..61f89747e24d --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl @@ -0,0 +1,3645 @@ +package dataproc + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type WorkflowTemplate struct { + Name *string `json:"name"` + Version *int64 `json:"version"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Labels map[string]string `json:"labels"` + EncryptionConfig *WorkflowTemplateEncryptionConfig `json:"encryptionConfig"` + Placement *WorkflowTemplatePlacement `json:"placement"` + Jobs []WorkflowTemplateJobs `json:"jobs"` + Parameters []WorkflowTemplateParameters `json:"parameters"` + DagTimeout *string `json:"dagTimeout"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *WorkflowTemplate) String() string { + return dcl.SprintResource(r) +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum. +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum string + +// WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + v := WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", "INHERIT_FROM_SUBNETWORK", "OUTBOUND", "BIDIRECTIONAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum. +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum string + +// WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + v := WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TYPE_UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum. +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum string + +// WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + v := WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum. +type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum string + +// WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef returns a *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum with the value of string s +// If the empty string is provided, nil is returned. +func WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(s string) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + v := WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(s) + return &v +} + +func (v WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "FLINK", "HBASE", "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum", + Value: string(v), + Valid: []string{}, + } +} + +type WorkflowTemplateEncryptionConfig struct { + empty bool `json:"-"` + KmsKey *string `json:"kmsKey"` +} + +type jsonWorkflowTemplateEncryptionConfig WorkflowTemplateEncryptionConfig + +func (r *WorkflowTemplateEncryptionConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateEncryptionConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateEncryptionConfig + } else { + + r.KmsKey = res.KmsKey + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateEncryptionConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateEncryptionConfig *WorkflowTemplateEncryptionConfig = &WorkflowTemplateEncryptionConfig{empty: true} + +func (r *WorkflowTemplateEncryptionConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateEncryptionConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateEncryptionConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacement struct { + empty bool `json:"-"` + ManagedCluster *WorkflowTemplatePlacementManagedCluster `json:"managedCluster"` + ClusterSelector *WorkflowTemplatePlacementClusterSelector `json:"clusterSelector"` +} + +type jsonWorkflowTemplatePlacement WorkflowTemplatePlacement + +func (r *WorkflowTemplatePlacement) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacement + } else { + + r.ManagedCluster = res.ManagedCluster + + r.ClusterSelector = res.ClusterSelector + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacement *WorkflowTemplatePlacement = &WorkflowTemplatePlacement{empty: true} + +func (r *WorkflowTemplatePlacement) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacement) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedCluster struct { + empty bool `json:"-"` + ClusterName *string `json:"clusterName"` + Config *WorkflowTemplatePlacementManagedClusterConfig `json:"config"` + Labels map[string]string `json:"labels"` +} + +type jsonWorkflowTemplatePlacementManagedCluster WorkflowTemplatePlacementManagedCluster + +func (r *WorkflowTemplatePlacementManagedCluster) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedCluster + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedCluster + } else { + + r.ClusterName = res.ClusterName + + r.Config = res.Config + + r.Labels = res.Labels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedCluster is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedCluster *WorkflowTemplatePlacementManagedCluster = &WorkflowTemplatePlacementManagedCluster{empty: true} + +func (r *WorkflowTemplatePlacementManagedCluster) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedCluster) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedCluster) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfig struct { + empty bool `json:"-"` + StagingBucket *string `json:"stagingBucket"` + TempBucket *string `json:"tempBucket"` + GceClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig `json:"gceClusterConfig"` + MasterConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfig `json:"masterConfig"` + WorkerConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig `json:"workerConfig"` + SecondaryWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig `json:"secondaryWorkerConfig"` + SoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig `json:"softwareConfig"` + InitializationActions []WorkflowTemplatePlacementManagedClusterConfigInitializationActions `json:"initializationActions"` + EncryptionConfig *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig `json:"encryptionConfig"` + AutoscalingConfig *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig `json:"autoscalingConfig"` + SecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig `json:"securityConfig"` + LifecycleConfig *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig `json:"lifecycleConfig"` + EndpointConfig *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig `json:"endpointConfig"` +{{- if ne $.TargetVersionName "ga" }} + GkeClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` + MetastoreConfig *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig `json:"metastoreConfig"` +{{- end }} +} + +type jsonWorkflowTemplatePlacementManagedClusterConfig WorkflowTemplatePlacementManagedClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfig + } else { + + r.StagingBucket = res.StagingBucket + + r.TempBucket = res.TempBucket + + r.GceClusterConfig = res.GceClusterConfig + + r.MasterConfig = res.MasterConfig + + r.WorkerConfig = res.WorkerConfig + + r.SecondaryWorkerConfig = res.SecondaryWorkerConfig + + r.SoftwareConfig = res.SoftwareConfig + + r.InitializationActions = res.InitializationActions + + r.EncryptionConfig = res.EncryptionConfig + + r.AutoscalingConfig = res.AutoscalingConfig + + r.SecurityConfig = res.SecurityConfig + + r.LifecycleConfig = res.LifecycleConfig + + r.EndpointConfig = res.EndpointConfig +{{- if ne $.TargetVersionName "ga" }} + + r.GkeClusterConfig = res.GkeClusterConfig + + r.MetastoreConfig = res.MetastoreConfig +{{- end }} + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfig *WorkflowTemplatePlacementManagedClusterConfig = &WorkflowTemplatePlacementManagedClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig struct { + empty bool `json:"-"` + Zone *string `json:"zone"` + Network *string `json:"network"` + Subnetwork *string `json:"subnetwork"` + InternalIPOnly *bool `json:"internalIPOnly"` + PrivateIPv6GoogleAccess *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum `json:"privateIPv6GoogleAccess"` + ServiceAccount *string `json:"serviceAccount"` + ServiceAccountScopes []string `json:"serviceAccountScopes"` + Tags []string `json:"tags"` + Metadata map[string]string `json:"metadata"` + ReservationAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity `json:"reservationAffinity"` + NodeGroupAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity `json:"nodeGroupAffinity"` + ShieldedInstanceConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig `json:"shieldedInstanceConfig"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } else { + + r.Zone = res.Zone + + r.Network = res.Network + + r.Subnetwork = res.Subnetwork + + r.InternalIPOnly = res.InternalIPOnly + + r.PrivateIPv6GoogleAccess = res.PrivateIPv6GoogleAccess + + r.ServiceAccount = res.ServiceAccount + + r.ServiceAccountScopes = res.ServiceAccountScopes + + r.Tags = res.Tags + + r.Metadata = res.Metadata + + r.ReservationAffinity = res.ReservationAffinity + + r.NodeGroupAffinity = res.NodeGroupAffinity + + r.ShieldedInstanceConfig = res.ShieldedInstanceConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity struct { + empty bool `json:"-"` + ConsumeReservationType *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum `json:"consumeReservationType"` + Key *string `json:"key"` + Values []string `json:"values"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } else { + + r.ConsumeReservationType = res.ConsumeReservationType + + r.Key = res.Key + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity struct { + empty bool `json:"-"` + NodeGroup *string `json:"nodeGroup"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } else { + + r.NodeGroup = res.NodeGroup + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig struct { + empty bool `json:"-"` + EnableSecureBoot *bool `json:"enableSecureBoot"` + EnableVtpm *bool `json:"enableVtpm"` + EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } else { + + r.EnableSecureBoot = res.EnableSecureBoot + + r.EnableVtpm = res.EnableVtpm + + r.EnableIntegrityMonitoring = res.EnableIntegrityMonitoring + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig struct { + empty bool `json:"-"` + NumInstances *int64 `json:"numInstances"` + InstanceNames []string `json:"instanceNames"` + Image *string `json:"image"` + MachineType *string `json:"machineType"` + DiskConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig `json:"diskConfig"` + IsPreemptible *bool `json:"isPreemptible"` + Preemptibility *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum `json:"preemptibility"` + ManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` + Accelerators []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators `json:"accelerators"` + MinCpuPlatform *string `json:"minCpuPlatform"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + } else { + + r.NumInstances = res.NumInstances + + r.InstanceNames = res.InstanceNames + + r.Image = res.Image + + r.MachineType = res.MachineType + + r.DiskConfig = res.DiskConfig + + r.IsPreemptible = res.IsPreemptible + + r.Preemptibility = res.Preemptibility + + r.ManagedGroupConfig = res.ManagedGroupConfig + + r.Accelerators = res.Accelerators + + r.MinCpuPlatform = res.MinCpuPlatform + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig struct { + empty bool `json:"-"` + BootDiskType *string `json:"bootDiskType"` + BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` + NumLocalSsds *int64 `json:"numLocalSsds"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + } else { + + r.BootDiskType = res.BootDiskType + + r.BootDiskSizeGb = res.BootDiskSizeGb + + r.NumLocalSsds = res.NumLocalSsds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig struct { + empty bool `json:"-"` + InstanceTemplateName *string `json:"instanceTemplateName"` + InstanceGroupManagerName *string `json:"instanceGroupManagerName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + } else { + + r.InstanceTemplateName = res.InstanceTemplateName + + r.InstanceGroupManagerName = res.InstanceGroupManagerName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators struct { + empty bool `json:"-"` + AcceleratorType *string `json:"acceleratorType"` + AcceleratorCount *int64 `json:"acceleratorCount"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + } else { + + r.AcceleratorType = res.AcceleratorType + + r.AcceleratorCount = res.AcceleratorCount + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig struct { + empty bool `json:"-"` + ImageVersion *string `json:"imageVersion"` + Properties map[string]string `json:"properties"` + OptionalComponents []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum `json:"optionalComponents"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } else { + + r.ImageVersion = res.ImageVersion + + r.Properties = res.Properties + + r.OptionalComponents = res.OptionalComponents + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigInitializationActions struct { + empty bool `json:"-"` + ExecutableFile *string `json:"executableFile"` + ExecutionTimeout *string `json:"executionTimeout"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigInitializationActions WorkflowTemplatePlacementManagedClusterConfigInitializationActions + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigInitializationActions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } else { + + r.ExecutableFile = res.ExecutableFile + + r.ExecutionTimeout = res.ExecutionTimeout + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigInitializationActions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions *WorkflowTemplatePlacementManagedClusterConfigInitializationActions = &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig struct { + empty bool `json:"-"` + GcePdKmsKeyName *string `json:"gcePdKmsKeyName"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } else { + + r.GcePdKmsKeyName = res.GcePdKmsKeyName + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig struct { + empty bool `json:"-"` + Policy *string `json:"policy"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } else { + + r.Policy = res.Policy + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecurityConfig struct { + empty bool `json:"-"` + KerberosConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig `json:"kerberosConfig"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } else { + + r.KerberosConfig = res.KerberosConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecurityConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig struct { + empty bool `json:"-"` + EnableKerberos *bool `json:"enableKerberos"` + RootPrincipalPassword *string `json:"rootPrincipalPassword"` + KmsKey *string `json:"kmsKey"` + Keystore *string `json:"keystore"` + Truststore *string `json:"truststore"` + KeystorePassword *string `json:"keystorePassword"` + KeyPassword *string `json:"keyPassword"` + TruststorePassword *string `json:"truststorePassword"` + CrossRealmTrustRealm *string `json:"crossRealmTrustRealm"` + CrossRealmTrustKdc *string `json:"crossRealmTrustKdc"` + CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer"` + CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword"` + KdcDbKey *string `json:"kdcDbKey"` + TgtLifetimeHours *int64 `json:"tgtLifetimeHours"` + Realm *string `json:"realm"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } else { + + r.EnableKerberos = res.EnableKerberos + + r.RootPrincipalPassword = res.RootPrincipalPassword + + r.KmsKey = res.KmsKey + + r.Keystore = res.Keystore + + r.Truststore = res.Truststore + + r.KeystorePassword = res.KeystorePassword + + r.KeyPassword = res.KeyPassword + + r.TruststorePassword = res.TruststorePassword + + r.CrossRealmTrustRealm = res.CrossRealmTrustRealm + + r.CrossRealmTrustKdc = res.CrossRealmTrustKdc + + r.CrossRealmTrustAdminServer = res.CrossRealmTrustAdminServer + + r.CrossRealmTrustSharedPassword = res.CrossRealmTrustSharedPassword + + r.KdcDbKey = res.KdcDbKey + + r.TgtLifetimeHours = res.TgtLifetimeHours + + r.Realm = res.Realm + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig struct { + empty bool `json:"-"` + IdleDeleteTtl *string `json:"idleDeleteTtl"` + AutoDeleteTime *string `json:"autoDeleteTime"` + AutoDeleteTtl *string `json:"autoDeleteTtl"` + IdleStartTime *string `json:"idleStartTime"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } else { + + r.IdleDeleteTtl = res.IdleDeleteTtl + + r.AutoDeleteTime = res.AutoDeleteTime + + r.AutoDeleteTtl = res.AutoDeleteTtl + + r.IdleStartTime = res.IdleStartTime + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigEndpointConfig struct { + empty bool `json:"-"` + HttpPorts map[string]string `json:"httpPorts"` + EnableHttpPortAccess *bool `json:"enableHttpPortAccess"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigEndpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } else { + + r.HttpPorts = res.HttpPorts + + r.EnableHttpPortAccess = res.EnableHttpPortAccess + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigEndpointConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- if ne $.TargetVersionName "ga" }} +type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig struct { + empty bool `json:"-"` + NamespacedGkeDeploymentTarget *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } else { + + r.NamespacedGkeDeploymentTarget = res.NamespacedGkeDeploymentTarget + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget struct { + empty bool `json:"-"` + TargetGkeCluster *string `json:"targetGkeCluster"` + ClusterNamespace *string `json:"clusterNamespace"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } else { + + r.TargetGkeCluster = res.TargetGkeCluster + + r.ClusterNamespace = res.ClusterNamespace + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig struct { + empty bool `json:"-"` + DataprocMetastoreService *string `json:"dataprocMetastoreService"` +} + +type jsonWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } else { + + r.DataprocMetastoreService = res.DataprocMetastoreService + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{empty: true} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +{{- end }} +type WorkflowTemplatePlacementClusterSelector struct { + empty bool `json:"-"` + Zone *string `json:"zone"` + ClusterLabels map[string]string `json:"clusterLabels"` +} + +type jsonWorkflowTemplatePlacementClusterSelector WorkflowTemplatePlacementClusterSelector + +func (r *WorkflowTemplatePlacementClusterSelector) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplatePlacementClusterSelector + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplatePlacementClusterSelector + } else { + + r.Zone = res.Zone + + r.ClusterLabels = res.ClusterLabels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplatePlacementClusterSelector is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplatePlacementClusterSelector *WorkflowTemplatePlacementClusterSelector = &WorkflowTemplatePlacementClusterSelector{empty: true} + +func (r *WorkflowTemplatePlacementClusterSelector) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplatePlacementClusterSelector) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplatePlacementClusterSelector) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobs struct { + empty bool `json:"-"` + StepId *string `json:"stepId"` + HadoopJob *WorkflowTemplateJobsHadoopJob `json:"hadoopJob"` + SparkJob *WorkflowTemplateJobsSparkJob `json:"sparkJob"` + PysparkJob *WorkflowTemplateJobsPysparkJob `json:"pysparkJob"` + HiveJob *WorkflowTemplateJobsHiveJob `json:"hiveJob"` + PigJob *WorkflowTemplateJobsPigJob `json:"pigJob"` + SparkRJob *WorkflowTemplateJobsSparkRJob `json:"sparkRJob"` + SparkSqlJob *WorkflowTemplateJobsSparkSqlJob `json:"sparkSqlJob"` + PrestoJob *WorkflowTemplateJobsPrestoJob `json:"prestoJob"` + Labels map[string]string `json:"labels"` + Scheduling *WorkflowTemplateJobsScheduling `json:"scheduling"` + PrerequisiteStepIds []string `json:"prerequisiteStepIds"` +} + +type jsonWorkflowTemplateJobs WorkflowTemplateJobs + +func (r *WorkflowTemplateJobs) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobs + } else { + + r.StepId = res.StepId + + r.HadoopJob = res.HadoopJob + + r.SparkJob = res.SparkJob + + r.PysparkJob = res.PysparkJob + + r.HiveJob = res.HiveJob + + r.PigJob = res.PigJob + + r.SparkRJob = res.SparkRJob + + r.SparkSqlJob = res.SparkSqlJob + + r.PrestoJob = res.PrestoJob + + r.Labels = res.Labels + + r.Scheduling = res.Scheduling + + r.PrerequisiteStepIds = res.PrerequisiteStepIds + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobs *WorkflowTemplateJobs = &WorkflowTemplateJobs{empty: true} + +func (r *WorkflowTemplateJobs) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobs) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHadoopJob struct { + empty bool `json:"-"` + MainJarFileUri *string `json:"mainJarFileUri"` + MainClass *string `json:"mainClass"` + Args []string `json:"args"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsHadoopJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsHadoopJob WorkflowTemplateJobsHadoopJob + +func (r *WorkflowTemplateJobsHadoopJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHadoopJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHadoopJob + } else { + + r.MainJarFileUri = res.MainJarFileUri + + r.MainClass = res.MainClass + + r.Args = res.Args + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHadoopJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHadoopJob *WorkflowTemplateJobsHadoopJob = &WorkflowTemplateJobsHadoopJob{empty: true} + +func (r *WorkflowTemplateJobsHadoopJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHadoopJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHadoopJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHadoopJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsHadoopJobLoggingConfig WorkflowTemplateJobsHadoopJobLoggingConfig + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHadoopJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHadoopJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHadoopJobLoggingConfig *WorkflowTemplateJobsHadoopJobLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkJob struct { + empty bool `json:"-"` + MainJarFileUri *string `json:"mainJarFileUri"` + MainClass *string `json:"mainClass"` + Args []string `json:"args"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsSparkJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkJob WorkflowTemplateJobsSparkJob + +func (r *WorkflowTemplateJobsSparkJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkJob + } else { + + r.MainJarFileUri = res.MainJarFileUri + + r.MainClass = res.MainClass + + r.Args = res.Args + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkJob *WorkflowTemplateJobsSparkJob = &WorkflowTemplateJobsSparkJob{empty: true} + +func (r *WorkflowTemplateJobsSparkJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkJobLoggingConfig WorkflowTemplateJobsSparkJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkJobLoggingConfig *WorkflowTemplateJobsSparkJobLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPysparkJob struct { + empty bool `json:"-"` + MainPythonFileUri *string `json:"mainPythonFileUri"` + Args []string `json:"args"` + PythonFileUris []string `json:"pythonFileUris"` + JarFileUris []string `json:"jarFileUris"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsPysparkJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPysparkJob WorkflowTemplateJobsPysparkJob + +func (r *WorkflowTemplateJobsPysparkJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPysparkJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPysparkJob + } else { + + r.MainPythonFileUri = res.MainPythonFileUri + + r.Args = res.Args + + r.PythonFileUris = res.PythonFileUris + + r.JarFileUris = res.JarFileUris + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPysparkJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPysparkJob *WorkflowTemplateJobsPysparkJob = &WorkflowTemplateJobsPysparkJob{empty: true} + +func (r *WorkflowTemplateJobsPysparkJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPysparkJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPysparkJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPysparkJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPysparkJobLoggingConfig WorkflowTemplateJobsPysparkJobLoggingConfig + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPysparkJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPysparkJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPysparkJobLoggingConfig *WorkflowTemplateJobsPysparkJobLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHiveJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsHiveJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` +} + +type jsonWorkflowTemplateJobsHiveJob WorkflowTemplateJobsHiveJob + +func (r *WorkflowTemplateJobsHiveJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHiveJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHiveJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHiveJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHiveJob *WorkflowTemplateJobsHiveJob = &WorkflowTemplateJobsHiveJob{empty: true} + +func (r *WorkflowTemplateJobsHiveJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHiveJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHiveJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsHiveJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsHiveJobQueryList WorkflowTemplateJobsHiveJobQueryList + +func (r *WorkflowTemplateJobsHiveJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsHiveJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsHiveJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsHiveJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsHiveJobQueryList *WorkflowTemplateJobsHiveJobQueryList = &WorkflowTemplateJobsHiveJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsHiveJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsHiveJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsHiveJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsPigJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` + LoggingConfig *WorkflowTemplateJobsPigJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPigJob WorkflowTemplateJobsPigJob + +func (r *WorkflowTemplateJobsPigJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJob *WorkflowTemplateJobsPigJob = &WorkflowTemplateJobsPigJob{empty: true} + +func (r *WorkflowTemplateJobsPigJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsPigJobQueryList WorkflowTemplateJobsPigJobQueryList + +func (r *WorkflowTemplateJobsPigJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJobQueryList *WorkflowTemplateJobsPigJobQueryList = &WorkflowTemplateJobsPigJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsPigJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPigJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPigJobLoggingConfig WorkflowTemplateJobsPigJobLoggingConfig + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPigJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPigJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPigJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPigJobLoggingConfig *WorkflowTemplateJobsPigJobLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPigJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkRJob struct { + empty bool `json:"-"` + MainRFileUri *string `json:"mainRFileUri"` + Args []string `json:"args"` + FileUris []string `json:"fileUris"` + ArchiveUris []string `json:"archiveUris"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsSparkRJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkRJob WorkflowTemplateJobsSparkRJob + +func (r *WorkflowTemplateJobsSparkRJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkRJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkRJob + } else { + + r.MainRFileUri = res.MainRFileUri + + r.Args = res.Args + + r.FileUris = res.FileUris + + r.ArchiveUris = res.ArchiveUris + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkRJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkRJob *WorkflowTemplateJobsSparkRJob = &WorkflowTemplateJobsSparkRJob{empty: true} + +func (r *WorkflowTemplateJobsSparkRJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkRJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkRJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkRJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkRJobLoggingConfig WorkflowTemplateJobsSparkRJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkRJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkRJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkRJobLoggingConfig *WorkflowTemplateJobsSparkRJobLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsSparkSqlJobQueryList `json:"queryList"` + ScriptVariables map[string]string `json:"scriptVariables"` + Properties map[string]string `json:"properties"` + JarFileUris []string `json:"jarFileUris"` + LoggingConfig *WorkflowTemplateJobsSparkSqlJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsSparkSqlJob WorkflowTemplateJobsSparkSqlJob + +func (r *WorkflowTemplateJobsSparkSqlJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ScriptVariables = res.ScriptVariables + + r.Properties = res.Properties + + r.JarFileUris = res.JarFileUris + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJob *WorkflowTemplateJobsSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsSparkSqlJobQueryList WorkflowTemplateJobsSparkSqlJobQueryList + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJobQueryList *WorkflowTemplateJobsSparkSqlJobQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsSparkSqlJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsSparkSqlJobLoggingConfig WorkflowTemplateJobsSparkSqlJobLoggingConfig + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsSparkSqlJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsSparkSqlJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig *WorkflowTemplateJobsSparkSqlJobLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJob struct { + empty bool `json:"-"` + QueryFileUri *string `json:"queryFileUri"` + QueryList *WorkflowTemplateJobsPrestoJobQueryList `json:"queryList"` + ContinueOnFailure *bool `json:"continueOnFailure"` + OutputFormat *string `json:"outputFormat"` + ClientTags []string `json:"clientTags"` + Properties map[string]string `json:"properties"` + LoggingConfig *WorkflowTemplateJobsPrestoJobLoggingConfig `json:"loggingConfig"` +} + +type jsonWorkflowTemplateJobsPrestoJob WorkflowTemplateJobsPrestoJob + +func (r *WorkflowTemplateJobsPrestoJob) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJob + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJob + } else { + + r.QueryFileUri = res.QueryFileUri + + r.QueryList = res.QueryList + + r.ContinueOnFailure = res.ContinueOnFailure + + r.OutputFormat = res.OutputFormat + + r.ClientTags = res.ClientTags + + r.Properties = res.Properties + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJob is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJob *WorkflowTemplateJobsPrestoJob = &WorkflowTemplateJobsPrestoJob{empty: true} + +func (r *WorkflowTemplateJobsPrestoJob) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJob) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJob) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJobQueryList struct { + empty bool `json:"-"` + Queries []string `json:"queries"` +} + +type jsonWorkflowTemplateJobsPrestoJobQueryList WorkflowTemplateJobsPrestoJobQueryList + +func (r *WorkflowTemplateJobsPrestoJobQueryList) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJobQueryList + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJobQueryList + } else { + + r.Queries = res.Queries + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJobQueryList is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJobQueryList *WorkflowTemplateJobsPrestoJobQueryList = &WorkflowTemplateJobsPrestoJobQueryList{empty: true} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJobQueryList) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsPrestoJobLoggingConfig struct { + empty bool `json:"-"` + DriverLogLevels map[string]string `json:"driverLogLevels"` +} + +type jsonWorkflowTemplateJobsPrestoJobLoggingConfig WorkflowTemplateJobsPrestoJobLoggingConfig + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsPrestoJobLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } else { + + r.DriverLogLevels = res.DriverLogLevels + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsPrestoJobLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsPrestoJobLoggingConfig *WorkflowTemplateJobsPrestoJobLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{empty: true} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateJobsScheduling struct { + empty bool `json:"-"` + MaxFailuresPerHour *int64 `json:"maxFailuresPerHour"` + MaxFailuresTotal *int64 `json:"maxFailuresTotal"` +} + +type jsonWorkflowTemplateJobsScheduling WorkflowTemplateJobsScheduling + +func (r *WorkflowTemplateJobsScheduling) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateJobsScheduling + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateJobsScheduling + } else { + + r.MaxFailuresPerHour = res.MaxFailuresPerHour + + r.MaxFailuresTotal = res.MaxFailuresTotal + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateJobsScheduling is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateJobsScheduling *WorkflowTemplateJobsScheduling = &WorkflowTemplateJobsScheduling{empty: true} + +func (r *WorkflowTemplateJobsScheduling) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateJobsScheduling) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateJobsScheduling) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParameters struct { + empty bool `json:"-"` + Name *string `json:"name"` + Fields []string `json:"fields"` + Description *string `json:"description"` + Validation *WorkflowTemplateParametersValidation `json:"validation"` +} + +type jsonWorkflowTemplateParameters WorkflowTemplateParameters + +func (r *WorkflowTemplateParameters) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParameters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParameters + } else { + + r.Name = res.Name + + r.Fields = res.Fields + + r.Description = res.Description + + r.Validation = res.Validation + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParameters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParameters *WorkflowTemplateParameters = &WorkflowTemplateParameters{empty: true} + +func (r *WorkflowTemplateParameters) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParameters) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParameters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidation struct { + empty bool `json:"-"` + Regex *WorkflowTemplateParametersValidationRegex `json:"regex"` + Values *WorkflowTemplateParametersValidationValues `json:"values"` +} + +type jsonWorkflowTemplateParametersValidation WorkflowTemplateParametersValidation + +func (r *WorkflowTemplateParametersValidation) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidation + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidation + } else { + + r.Regex = res.Regex + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidation is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidation *WorkflowTemplateParametersValidation = &WorkflowTemplateParametersValidation{empty: true} + +func (r *WorkflowTemplateParametersValidation) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidation) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidation) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidationRegex struct { + empty bool `json:"-"` + Regexes []string `json:"regexes"` +} + +type jsonWorkflowTemplateParametersValidationRegex WorkflowTemplateParametersValidationRegex + +func (r *WorkflowTemplateParametersValidationRegex) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidationRegex + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidationRegex + } else { + + r.Regexes = res.Regexes + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidationRegex is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidationRegex *WorkflowTemplateParametersValidationRegex = &WorkflowTemplateParametersValidationRegex{empty: true} + +func (r *WorkflowTemplateParametersValidationRegex) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidationRegex) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidationRegex) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type WorkflowTemplateParametersValidationValues struct { + empty bool `json:"-"` + Values []string `json:"values"` +} + +type jsonWorkflowTemplateParametersValidationValues WorkflowTemplateParametersValidationValues + +func (r *WorkflowTemplateParametersValidationValues) UnmarshalJSON(data []byte) error { + var res jsonWorkflowTemplateParametersValidationValues + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyWorkflowTemplateParametersValidationValues + } else { + + r.Values = res.Values + + } + return nil +} + +// This object is used to assert a desired state where this WorkflowTemplateParametersValidationValues is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyWorkflowTemplateParametersValidationValues *WorkflowTemplateParametersValidationValues = &WorkflowTemplateParametersValidationValues{empty: true} + +func (r *WorkflowTemplateParametersValidationValues) Empty() bool { + return r.empty +} + +func (r *WorkflowTemplateParametersValidationValues) String() string { + return dcl.SprintResource(r) +} + +func (r *WorkflowTemplateParametersValidationValues) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *WorkflowTemplate) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "dataproc", + Type: "WorkflowTemplate", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "dataproc", +{{- end }} + } +} + +func (r *WorkflowTemplate) ID() (string, error) { + if err := extractWorkflowTemplateFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "version": dcl.ValueOrEmptyString(nr.Version), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "encryption_config": dcl.ValueOrEmptyString(nr.EncryptionConfig), + "placement": dcl.ValueOrEmptyString(nr.Placement), + "jobs": dcl.ValueOrEmptyString(nr.Jobs), + "parameters": dcl.ValueOrEmptyString(nr.Parameters), + "dag_timeout": dcl.ValueOrEmptyString(nr.DagTimeout), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const WorkflowTemplateMaxPage = -1 + +type WorkflowTemplateList struct { + Items []*WorkflowTemplate + + nextToken string + + pageSize int32 + + resource *WorkflowTemplate +} + +func (l *WorkflowTemplateList) HasNext() bool { + return l.nextToken != "" +} + +func (l *WorkflowTemplateList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listWorkflowTemplate(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListWorkflowTemplate(ctx context.Context, project, location string) (*WorkflowTemplateList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListWorkflowTemplateWithMaxResults(ctx, project, location, WorkflowTemplateMaxPage) + +} + +func (c *Client) ListWorkflowTemplateWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*WorkflowTemplateList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &WorkflowTemplate{ + Project: &project, + Location: &location, + } + items, token, err := c.listWorkflowTemplate(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &WorkflowTemplateList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) (*WorkflowTemplate, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractWorkflowTemplateFields(r) + + b, err := c.getWorkflowTemplateRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalWorkflowTemplate(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeWorkflowTemplateNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractWorkflowTemplateFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteWorkflowTemplate(ctx context.Context, r *WorkflowTemplate) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("WorkflowTemplate resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting WorkflowTemplate...") + deleteOp := deleteWorkflowTemplateOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllWorkflowTemplate deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllWorkflowTemplate(ctx context.Context, project, location string, filter func(*WorkflowTemplate) bool) error { + listObj, err := c.ListWorkflowTemplate(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllWorkflowTemplate(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllWorkflowTemplate(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyWorkflowTemplate(ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *WorkflowTemplate + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyWorkflowTemplateHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyWorkflowTemplateHelper(c *Client, ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyWorkflowTemplate...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractWorkflowTemplateFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.workflowTemplateDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToWorkflowTemplateDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []workflowTemplateApiOperation + if create { + ops = append(ops, &createWorkflowTemplateOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyWorkflowTemplateDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyWorkflowTemplateDiff(c *Client, ctx context.Context, desired *WorkflowTemplate, rawDesired *WorkflowTemplate, ops []workflowTemplateApiOperation, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetWorkflowTemplate(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createWorkflowTemplateOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapWorkflowTemplate(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeWorkflowTemplateNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeWorkflowTemplateNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeWorkflowTemplateDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractWorkflowTemplateFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractWorkflowTemplateFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffWorkflowTemplate(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl new file mode 100644 index 000000000000..3faedcb84a9d --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template_internal.go.tmpl @@ -0,0 +1,20443 @@ +package dataproc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *WorkflowTemplate) validate() error { + + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.Required(r, "placement"); err != nil { + return err + } + if err := dcl.Required(r, "jobs"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { + if err := r.EncryptionConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Placement) { + if err := r.Placement.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateEncryptionConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacement) validate() error { + if !dcl.IsEmptyValueIndirect(r.ManagedCluster) { + if err := r.ManagedCluster.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ClusterSelector) { + if err := r.ClusterSelector.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedCluster) validate() error { + if err := dcl.Required(r, "clusterName"); err != nil { + return err + } + if err := dcl.Required(r, "config"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Config) { + if err := r.Config.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.GceClusterConfig) { + if err := r.GceClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MasterConfig) { + if err := r.MasterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { + if err := r.WorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { + if err := r.SecondaryWorkerConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SoftwareConfig) { + if err := r.SoftwareConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { + if err := r.EncryptionConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AutoscalingConfig) { + if err := r.AutoscalingConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SecurityConfig) { + if err := r.SecurityConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LifecycleConfig) { + if err := r.LifecycleConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.EndpointConfig) { + if err := r.EndpointConfig.validate(); err != nil { + return err + } + } +{{- if ne $.TargetVersionName "ga" }} + if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { + if err := r.GkeClusterConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { + if err := r.MetastoreConfig.validate(); err != nil { + return err + } + } +{{- end }} + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.ReservationAffinity) { + if err := r.ReservationAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.NodeGroupAffinity) { + if err := r.NodeGroupAffinity.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ShieldedInstanceConfig) { + if err := r.ShieldedInstanceConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) validate() error { + if err := dcl.Required(r, "nodeGroup"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DiskConfig) { + if err := r.DiskConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { + if err := r.ManagedGroupConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.KerberosConfig) { + if err := r.KerberosConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) validate() error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.NamespacedGkeDeploymentTarget) { + if err := r.NamespacedGkeDeploymentTarget.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) validate() error { + return nil +} +func (r *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) validate() error { + if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { + return err + } + return nil +} +{{- end }} +func (r *WorkflowTemplatePlacementClusterSelector) validate() error { + if err := dcl.Required(r, "clusterLabels"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobs) validate() error { + if err := dcl.Required(r, "stepId"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.HadoopJob) { + if err := r.HadoopJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkJob) { + if err := r.SparkJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PysparkJob) { + if err := r.PysparkJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.HiveJob) { + if err := r.HiveJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PigJob) { + if err := r.PigJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkRJob) { + if err := r.SparkRJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.SparkSqlJob) { + if err := r.SparkSqlJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PrestoJob) { + if err := r.PrestoJob.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Scheduling) { + if err := r.Scheduling.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHadoopJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHadoopJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsPysparkJob) validate() error { + if err := dcl.Required(r, "mainPythonFileUri"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPysparkJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsHiveJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsHiveJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPigJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPigJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPigJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkRJob) validate() error { + if err := dcl.Required(r, "mainRFileUri"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkRJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsSparkSqlJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsPrestoJob) validate() error { + if !dcl.IsEmptyValueIndirect(r.QueryList) { + if err := r.QueryList.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateJobsPrestoJobQueryList) validate() error { + if err := dcl.Required(r, "queries"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateJobsPrestoJobLoggingConfig) validate() error { + return nil +} +func (r *WorkflowTemplateJobsScheduling) validate() error { + return nil +} +func (r *WorkflowTemplateParameters) validate() error { + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "fields"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Validation) { + if err := r.Validation.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateParametersValidation) validate() error { + if !dcl.IsEmptyValueIndirect(r.Regex) { + if err := r.Regex.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Values) { + if err := r.Values.validate(); err != nil { + return err + } + } + return nil +} +func (r *WorkflowTemplateParametersValidationRegex) validate() error { + if err := dcl.Required(r, "regexes"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplateParametersValidationValues) validate() error { + if err := dcl.Required(r, "values"); err != nil { + return err + } + return nil +} +func (r *WorkflowTemplate) basePath() string { + params := map[string]interface{}{} +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) +{{- else }} + return dcl.Nprintf("https://dataproc.googleapis.com/v1/", params) +{{- end }} +} + +func (r *WorkflowTemplate) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *WorkflowTemplate) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkflowTemplate) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates", nr.basePath(), userBasePath, params), nil + +} + +func (r *WorkflowTemplate) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// workflowTemplateApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type workflowTemplateApiOperation interface { + do(context.Context, *WorkflowTemplate, *Client) error +} + +func (c *Client) listWorkflowTemplateRaw(ctx context.Context, r *WorkflowTemplate, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != WorkflowTemplateMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listWorkflowTemplateOperation struct { + Templates []map[string]interface{} `json:"templates"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listWorkflowTemplate(ctx context.Context, r *WorkflowTemplate, pageToken string, pageSize int32) ([]*WorkflowTemplate, string, error) { + b, err := c.listWorkflowTemplateRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listWorkflowTemplateOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*WorkflowTemplate + for _, v := range m.Templates { + res, err := unmarshalMapWorkflowTemplate(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllWorkflowTemplate(ctx context.Context, f func(*WorkflowTemplate) bool, resources []*WorkflowTemplate) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteWorkflowTemplate(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteWorkflowTemplateOperation struct{} + +func (op *deleteWorkflowTemplateOperation) do(ctx context.Context, r *WorkflowTemplate, c *Client) error { + r, err := c.GetWorkflowTemplate(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "WorkflowTemplate not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetWorkflowTemplate checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete WorkflowTemplate: %w", err) + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetWorkflowTemplate(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createWorkflowTemplateOperation struct { + response map[string]interface{} +} + +func (op *createWorkflowTemplateOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createWorkflowTemplateOperation) do(ctx context.Context, r *WorkflowTemplate, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + var m map[string]interface{} + if err := json.Unmarshal(req, &m); err != nil { + return err + } + normalized := r.urlNormalized() + m["id"] = fmt.Sprintf("%s", *normalized.Name) + + req, err = json.Marshal(m) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + if _, err := c.GetWorkflowTemplate(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getWorkflowTemplateRaw(ctx context.Context, r *WorkflowTemplate) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) workflowTemplateDiffsForRawDesired(ctx context.Context, rawDesired *WorkflowTemplate, opts ...dcl.ApplyOption) (initial, desired *WorkflowTemplate, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *WorkflowTemplate + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*WorkflowTemplate); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected WorkflowTemplate, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetWorkflowTemplate(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a WorkflowTemplate resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve WorkflowTemplate resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that WorkflowTemplate resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for WorkflowTemplate: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for WorkflowTemplate: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractWorkflowTemplateFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeWorkflowTemplateInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for WorkflowTemplate: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for WorkflowTemplate: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffWorkflowTemplate(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeWorkflowTemplateInitialState(rawInitial, rawDesired *WorkflowTemplate) (*WorkflowTemplate, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeWorkflowTemplateDesiredState(rawDesired, rawInitial *WorkflowTemplate, opts ...dcl.ApplyOption) (*WorkflowTemplate, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.EncryptionConfig = canonicalizeWorkflowTemplateEncryptionConfig(rawDesired.EncryptionConfig, nil, opts...) + rawDesired.Placement = canonicalizeWorkflowTemplatePlacement(rawDesired.Placement, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &WorkflowTemplate{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.EncryptionConfig = canonicalizeWorkflowTemplateEncryptionConfig(rawDesired.EncryptionConfig, rawInitial.EncryptionConfig, opts...) + canonicalDesired.Placement = canonicalizeWorkflowTemplatePlacement(rawDesired.Placement, rawInitial.Placement, opts...) + canonicalDesired.Jobs = canonicalizeWorkflowTemplateJobsSlice(rawDesired.Jobs, rawInitial.Jobs, opts...) + canonicalDesired.Parameters = canonicalizeWorkflowTemplateParametersSlice(rawDesired.Parameters, rawInitial.Parameters, opts...) + if dcl.StringCanonicalize(rawDesired.DagTimeout, rawInitial.DagTimeout) { + canonicalDesired.DagTimeout = rawInitial.DagTimeout + } else { + canonicalDesired.DagTimeout = rawDesired.DagTimeout + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeWorkflowTemplateNewState(c *Client, rawNew, rawDesired *WorkflowTemplate) (*WorkflowTemplate, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.Version) && dcl.IsEmptyValueIndirect(rawDesired.Version) { + rawNew.Version = rawDesired.Version + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.EncryptionConfig) && dcl.IsEmptyValueIndirect(rawDesired.EncryptionConfig) { + rawNew.EncryptionConfig = rawDesired.EncryptionConfig + } else { + rawNew.EncryptionConfig = canonicalizeNewWorkflowTemplateEncryptionConfig(c, rawDesired.EncryptionConfig, rawNew.EncryptionConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.Placement) && dcl.IsEmptyValueIndirect(rawDesired.Placement) { + rawNew.Placement = rawDesired.Placement + } else { + rawNew.Placement = canonicalizeNewWorkflowTemplatePlacement(c, rawDesired.Placement, rawNew.Placement) + } + + if dcl.IsEmptyValueIndirect(rawNew.Jobs) && dcl.IsEmptyValueIndirect(rawDesired.Jobs) { + rawNew.Jobs = rawDesired.Jobs + } else { + rawNew.Jobs = canonicalizeNewWorkflowTemplateJobsSlice(c, rawDesired.Jobs, rawNew.Jobs) + } + + if dcl.IsEmptyValueIndirect(rawNew.Parameters) && dcl.IsEmptyValueIndirect(rawDesired.Parameters) { + rawNew.Parameters = rawDesired.Parameters + } else { + rawNew.Parameters = canonicalizeNewWorkflowTemplateParametersSlice(c, rawDesired.Parameters, rawNew.Parameters) + } + + if dcl.IsEmptyValueIndirect(rawNew.DagTimeout) && dcl.IsEmptyValueIndirect(rawDesired.DagTimeout) { + rawNew.DagTimeout = rawDesired.DagTimeout + } else { + if dcl.StringCanonicalize(rawDesired.DagTimeout, rawNew.DagTimeout) { + rawNew.DagTimeout = rawDesired.DagTimeout + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeWorkflowTemplateEncryptionConfig(des, initial *WorkflowTemplateEncryptionConfig, opts ...dcl.ApplyOption) *WorkflowTemplateEncryptionConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateEncryptionConfig{} + + if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KmsKey = initial.KmsKey + } else { + cDes.KmsKey = des.KmsKey + } + + return cDes +} + +func canonicalizeWorkflowTemplateEncryptionConfigSlice(des, initial []WorkflowTemplateEncryptionConfig, opts ...dcl.ApplyOption) []WorkflowTemplateEncryptionConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateEncryptionConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateEncryptionConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateEncryptionConfig(c *Client, des, nw *WorkflowTemplateEncryptionConfig) *WorkflowTemplateEncryptionConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateEncryptionConfigSet(c *Client, des, nw []WorkflowTemplateEncryptionConfig) []WorkflowTemplateEncryptionConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateEncryptionConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateEncryptionConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateEncryptionConfigSlice(c *Client, des, nw []WorkflowTemplateEncryptionConfig) []WorkflowTemplateEncryptionConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateEncryptionConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateEncryptionConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacement(des, initial *WorkflowTemplatePlacement, opts ...dcl.ApplyOption) *WorkflowTemplatePlacement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacement{} + + cDes.ManagedCluster = canonicalizeWorkflowTemplatePlacementManagedCluster(des.ManagedCluster, initial.ManagedCluster, opts...) + cDes.ClusterSelector = canonicalizeWorkflowTemplatePlacementClusterSelector(des.ClusterSelector, initial.ClusterSelector, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementSlice(des, initial []WorkflowTemplatePlacement, opts ...dcl.ApplyOption) []WorkflowTemplatePlacement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacement, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacement, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacement(c *Client, des, nw *WorkflowTemplatePlacement) *WorkflowTemplatePlacement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ManagedCluster = canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, des.ManagedCluster, nw.ManagedCluster) + nw.ClusterSelector = canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, des.ClusterSelector, nw.ClusterSelector) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementSet(c *Client, des, nw []WorkflowTemplatePlacement) []WorkflowTemplatePlacement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementSlice(c *Client, des, nw []WorkflowTemplatePlacement) []WorkflowTemplatePlacement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacement(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedCluster(des, initial *WorkflowTemplatePlacementManagedCluster, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedCluster { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedCluster{} + + if dcl.StringCanonicalize(des.ClusterName, initial.ClusterName) || dcl.IsZeroValue(des.ClusterName) { + cDes.ClusterName = initial.ClusterName + } else { + cDes.ClusterName = des.ClusterName + } + cDes.Config = canonicalizeWorkflowTemplatePlacementManagedClusterConfig(des.Config, initial.Config, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterSlice(des, initial []WorkflowTemplatePlacementManagedCluster, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedCluster { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedCluster(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedCluster(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedCluster(c *Client, des, nw *WorkflowTemplatePlacementManagedCluster) *WorkflowTemplatePlacementManagedCluster { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedCluster while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ClusterName, nw.ClusterName) { + nw.ClusterName = des.ClusterName + } + nw.Config = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, des.Config, nw.Config) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterSet(c *Client, des, nw []WorkflowTemplatePlacementManagedCluster) []WorkflowTemplatePlacementManagedCluster { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedCluster + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedCluster) []WorkflowTemplatePlacementManagedCluster { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedCluster + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedCluster(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfig{} + + if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.StagingBucket = initial.StagingBucket + } else { + cDes.StagingBucket = des.StagingBucket + } + if dcl.IsZeroValue(des.TempBucket) || (dcl.IsEmptyValueIndirect(des.TempBucket) && dcl.IsEmptyValueIndirect(initial.TempBucket)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TempBucket = initial.TempBucket + } else { + cDes.TempBucket = des.TempBucket + } + cDes.GceClusterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(des.GceClusterConfig, initial.GceClusterConfig, opts...) + cDes.MasterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(des.MasterConfig, initial.MasterConfig, opts...) + cDes.WorkerConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) + cDes.SecondaryWorkerConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(des.SecondaryWorkerConfig, initial.SecondaryWorkerConfig, opts...) + cDes.SoftwareConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(des.SoftwareConfig, initial.SoftwareConfig, opts...) + cDes.InitializationActions = canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(des.InitializationActions, initial.InitializationActions, opts...) + cDes.EncryptionConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(des.EncryptionConfig, initial.EncryptionConfig, opts...) + cDes.AutoscalingConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(des.AutoscalingConfig, initial.AutoscalingConfig, opts...) + cDes.SecurityConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(des.SecurityConfig, initial.SecurityConfig, opts...) + cDes.LifecycleConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(des.LifecycleConfig, initial.LifecycleConfig, opts...) + cDes.EndpointConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(des.EndpointConfig, initial.EndpointConfig, opts...) +{{- if ne $.TargetVersionName "ga" }} + cDes.GkeClusterConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) + cDes.MetastoreConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) +{{- end }} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfig) *WorkflowTemplatePlacementManagedClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GceClusterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, des.GceClusterConfig, nw.GceClusterConfig) + nw.MasterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, des.MasterConfig, nw.MasterConfig) + nw.WorkerConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) + nw.SecondaryWorkerConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, des.SecondaryWorkerConfig, nw.SecondaryWorkerConfig) + nw.SoftwareConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, des.SoftwareConfig, nw.SoftwareConfig) + nw.InitializationActions = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, des.InitializationActions, nw.InitializationActions) + nw.EncryptionConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, des.EncryptionConfig, nw.EncryptionConfig) + nw.AutoscalingConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, des.AutoscalingConfig, nw.AutoscalingConfig) + nw.SecurityConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, des.SecurityConfig, nw.SecurityConfig) + nw.LifecycleConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, des.LifecycleConfig, nw.LifecycleConfig) + nw.EndpointConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, des.EndpointConfig, nw.EndpointConfig) +{{- if ne $.TargetVersionName "ga" }} + nw.GkeClusterConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) + nw.MetastoreConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) +{{- end }} + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfig) []WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfig) []WorkflowTemplatePlacementManagedClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + + if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { + cDes.Zone = initial.Zone + } else { + cDes.Zone = des.Zone + } + if dcl.IsZeroValue(des.Network) || (dcl.IsEmptyValueIndirect(des.Network) && dcl.IsEmptyValueIndirect(initial.Network)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Network = initial.Network + } else { + cDes.Network = des.Network + } + if dcl.IsZeroValue(des.Subnetwork) || (dcl.IsEmptyValueIndirect(des.Subnetwork) && dcl.IsEmptyValueIndirect(initial.Subnetwork)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Subnetwork = initial.Subnetwork + } else { + cDes.Subnetwork = des.Subnetwork + } + if dcl.BoolCanonicalize(des.InternalIPOnly, initial.InternalIPOnly) || dcl.IsZeroValue(des.InternalIPOnly) { + cDes.InternalIPOnly = initial.InternalIPOnly + } else { + cDes.InternalIPOnly = des.InternalIPOnly + } + if dcl.IsZeroValue(des.PrivateIPv6GoogleAccess) || (dcl.IsEmptyValueIndirect(des.PrivateIPv6GoogleAccess) && dcl.IsEmptyValueIndirect(initial.PrivateIPv6GoogleAccess)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PrivateIPv6GoogleAccess = initial.PrivateIPv6GoogleAccess + } else { + cDes.PrivateIPv6GoogleAccess = des.PrivateIPv6GoogleAccess + } + if dcl.IsZeroValue(des.ServiceAccount) || (dcl.IsEmptyValueIndirect(des.ServiceAccount) && dcl.IsEmptyValueIndirect(initial.ServiceAccount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ServiceAccount = initial.ServiceAccount + } else { + cDes.ServiceAccount = des.ServiceAccount + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, initial.ServiceAccountScopes) { + cDes.ServiceAccountScopes = initial.ServiceAccountScopes + } else { + cDes.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, initial.Tags) { + cDes.Tags = initial.Tags + } else { + cDes.Tags = des.Tags + } + if dcl.IsZeroValue(des.Metadata) || (dcl.IsEmptyValueIndirect(des.Metadata) && dcl.IsEmptyValueIndirect(initial.Metadata)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Metadata = initial.Metadata + } else { + cDes.Metadata = des.Metadata + } + cDes.ReservationAffinity = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(des.ReservationAffinity, initial.ReservationAffinity, opts...) + cDes.NodeGroupAffinity = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(des.NodeGroupAffinity, initial.NodeGroupAffinity, opts...) + cDes.ShieldedInstanceConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(des.ShieldedInstanceConfig, initial.ShieldedInstanceConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Zone, nw.Zone) { + nw.Zone = des.Zone + } + if dcl.BoolCanonicalize(des.InternalIPOnly, nw.InternalIPOnly) { + nw.InternalIPOnly = des.InternalIPOnly + } + if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, nw.ServiceAccountScopes) { + nw.ServiceAccountScopes = des.ServiceAccountScopes + } + if dcl.StringArrayCanonicalize(des.Tags, nw.Tags) { + nw.Tags = des.Tags + } + nw.ReservationAffinity = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, des.ReservationAffinity, nw.ReservationAffinity) + nw.NodeGroupAffinity = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, des.NodeGroupAffinity, nw.NodeGroupAffinity) + nw.ShieldedInstanceConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, des.ShieldedInstanceConfig, nw.ShieldedInstanceConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsZeroValue(des.ConsumeReservationType) || (dcl.IsEmptyValueIndirect(des.ConsumeReservationType) && dcl.IsEmptyValueIndirect(initial.ConsumeReservationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConsumeReservationType = initial.ConsumeReservationType + } else { + cDes.ConsumeReservationType = des.ConsumeReservationType + } + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, initial.Values) { + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringArrayCanonicalize(des.Values, nw.Values) { + nw.Values = des.Values + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsZeroValue(des.NodeGroup) || (dcl.IsEmptyValueIndirect(des.NodeGroup) && dcl.IsEmptyValueIndirect(initial.NodeGroup)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NodeGroup = initial.NodeGroup + } else { + cDes.NodeGroup = des.NodeGroup + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.BoolCanonicalize(des.EnableSecureBoot, initial.EnableSecureBoot) || dcl.IsZeroValue(des.EnableSecureBoot) { + cDes.EnableSecureBoot = initial.EnableSecureBoot + } else { + cDes.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, initial.EnableVtpm) || dcl.IsZeroValue(des.EnableVtpm) { + cDes.EnableVtpm = initial.EnableVtpm + } else { + cDes.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, initial.EnableIntegrityMonitoring) || dcl.IsZeroValue(des.EnableIntegrityMonitoring) { + cDes.EnableIntegrityMonitoring = initial.EnableIntegrityMonitoring + } else { + cDes.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableSecureBoot, nw.EnableSecureBoot) { + nw.EnableSecureBoot = des.EnableSecureBoot + } + if dcl.BoolCanonicalize(des.EnableVtpm, nw.EnableVtpm) { + nw.EnableVtpm = des.EnableVtpm + } + if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, nw.EnableIntegrityMonitoring) { + nw.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + + if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumInstances = initial.NumInstances + } else { + cDes.NumInstances = des.NumInstances + } + if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Image = initial.Image + } else { + cDes.Image = des.Image + } + if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { + cDes.MachineType = initial.MachineType + } else { + cDes.MachineType = des.MachineType + } + cDes.DiskConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) + if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Preemptibility = initial.Preemptibility + } else { + cDes.Preemptibility = des.Preemptibility + } + cDes.Accelerators = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) + if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { + cDes.MinCpuPlatform = initial.MinCpuPlatform + } else { + cDes.MinCpuPlatform = des.MinCpuPlatform + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { + nw.InstanceNames = des.InstanceNames + } + if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { + nw.MachineType = des.MachineType + } + nw.DiskConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) + if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { + nw.IsPreemptible = des.IsPreemptible + } + nw.ManagedGroupConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) + nw.Accelerators = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) + if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { + nw.MinCpuPlatform = des.MinCpuPlatform + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { + cDes.BootDiskType = initial.BootDiskType + } else { + cDes.BootDiskType = des.BootDiskType + } + if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.BootDiskSizeGb = initial.BootDiskSizeGb + } else { + cDes.BootDiskSizeGb = des.BootDiskSizeGb + } + if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.NumLocalSsds = initial.NumLocalSsds + } else { + cDes.NumLocalSsds = des.NumLocalSsds + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { + nw.BootDiskType = des.BootDiskType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { + nw.InstanceTemplateName = des.InstanceTemplateName + } + if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { + nw.InstanceGroupManagerName = des.InstanceGroupManagerName + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { + cDes.AcceleratorType = initial.AcceleratorType + } else { + cDes.AcceleratorType = des.AcceleratorType + } + if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AcceleratorCount = initial.AcceleratorCount + } else { + cDes.AcceleratorCount = des.AcceleratorCount + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { + nw.AcceleratorType = des.AcceleratorType + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + + if dcl.StringCanonicalize(des.ImageVersion, initial.ImageVersion) || dcl.IsZeroValue(des.ImageVersion) { + cDes.ImageVersion = initial.ImageVersion + } else { + cDes.ImageVersion = des.ImageVersion + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.IsZeroValue(des.OptionalComponents) || (dcl.IsEmptyValueIndirect(des.OptionalComponents) && dcl.IsEmptyValueIndirect(initial.OptionalComponents)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.OptionalComponents = initial.OptionalComponents + } else { + cDes.OptionalComponents = des.OptionalComponents + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ImageVersion, nw.ImageVersion) { + nw.ImageVersion = des.ImageVersion + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(des, initial *WorkflowTemplatePlacementManagedClusterConfigInitializationActions, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + + if dcl.StringCanonicalize(des.ExecutableFile, initial.ExecutableFile) || dcl.IsZeroValue(des.ExecutableFile) { + cDes.ExecutableFile = initial.ExecutableFile + } else { + cDes.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { + cDes.ExecutionTimeout = initial.ExecutionTimeout + } else { + cDes.ExecutionTimeout = des.ExecutionTimeout + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigInitializationActions, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigInitializationActions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigInitializationActions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ExecutableFile, nw.ExecutableFile) { + nw.ExecutableFile = des.ExecutableFile + } + if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { + nw.ExecutionTimeout = des.ExecutionTimeout + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigInitializationActions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigInitializationActions) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigInitializationActions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + + if dcl.IsZeroValue(des.GcePdKmsKeyName) || (dcl.IsEmptyValueIndirect(des.GcePdKmsKeyName) && dcl.IsEmptyValueIndirect(initial.GcePdKmsKeyName)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcePdKmsKeyName = initial.GcePdKmsKeyName + } else { + cDes.GcePdKmsKeyName = des.GcePdKmsKeyName + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + + if dcl.IsZeroValue(des.Policy) || (dcl.IsEmptyValueIndirect(des.Policy) && dcl.IsEmptyValueIndirect(initial.Policy)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Policy = initial.Policy + } else { + cDes.Policy = des.Policy + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + + cDes.KerberosConfig = canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(des.KerberosConfig, initial.KerberosConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecurityConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.KerberosConfig = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, des.KerberosConfig, nw.KerberosConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + + if dcl.BoolCanonicalize(des.EnableKerberos, initial.EnableKerberos) || dcl.IsZeroValue(des.EnableKerberos) { + cDes.EnableKerberos = initial.EnableKerberos + } else { + cDes.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, initial.RootPrincipalPassword) || dcl.IsZeroValue(des.RootPrincipalPassword) { + cDes.RootPrincipalPassword = initial.RootPrincipalPassword + } else { + cDes.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.KmsKey = initial.KmsKey + } else { + cDes.KmsKey = des.KmsKey + } + if dcl.StringCanonicalize(des.Keystore, initial.Keystore) || dcl.IsZeroValue(des.Keystore) { + cDes.Keystore = initial.Keystore + } else { + cDes.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, initial.Truststore) || dcl.IsZeroValue(des.Truststore) { + cDes.Truststore = initial.Truststore + } else { + cDes.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, initial.KeystorePassword) || dcl.IsZeroValue(des.KeystorePassword) { + cDes.KeystorePassword = initial.KeystorePassword + } else { + cDes.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, initial.KeyPassword) || dcl.IsZeroValue(des.KeyPassword) { + cDes.KeyPassword = initial.KeyPassword + } else { + cDes.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, initial.TruststorePassword) || dcl.IsZeroValue(des.TruststorePassword) { + cDes.TruststorePassword = initial.TruststorePassword + } else { + cDes.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, initial.CrossRealmTrustRealm) || dcl.IsZeroValue(des.CrossRealmTrustRealm) { + cDes.CrossRealmTrustRealm = initial.CrossRealmTrustRealm + } else { + cDes.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, initial.CrossRealmTrustKdc) || dcl.IsZeroValue(des.CrossRealmTrustKdc) { + cDes.CrossRealmTrustKdc = initial.CrossRealmTrustKdc + } else { + cDes.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, initial.CrossRealmTrustAdminServer) || dcl.IsZeroValue(des.CrossRealmTrustAdminServer) { + cDes.CrossRealmTrustAdminServer = initial.CrossRealmTrustAdminServer + } else { + cDes.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, initial.CrossRealmTrustSharedPassword) || dcl.IsZeroValue(des.CrossRealmTrustSharedPassword) { + cDes.CrossRealmTrustSharedPassword = initial.CrossRealmTrustSharedPassword + } else { + cDes.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, initial.KdcDbKey) || dcl.IsZeroValue(des.KdcDbKey) { + cDes.KdcDbKey = initial.KdcDbKey + } else { + cDes.KdcDbKey = des.KdcDbKey + } + if dcl.IsZeroValue(des.TgtLifetimeHours) || (dcl.IsEmptyValueIndirect(des.TgtLifetimeHours) && dcl.IsEmptyValueIndirect(initial.TgtLifetimeHours)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TgtLifetimeHours = initial.TgtLifetimeHours + } else { + cDes.TgtLifetimeHours = des.TgtLifetimeHours + } + if dcl.StringCanonicalize(des.Realm, initial.Realm) || dcl.IsZeroValue(des.Realm) { + cDes.Realm = initial.Realm + } else { + cDes.Realm = des.Realm + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableKerberos, nw.EnableKerberos) { + nw.EnableKerberos = des.EnableKerberos + } + if dcl.StringCanonicalize(des.RootPrincipalPassword, nw.RootPrincipalPassword) { + nw.RootPrincipalPassword = des.RootPrincipalPassword + } + if dcl.StringCanonicalize(des.Keystore, nw.Keystore) { + nw.Keystore = des.Keystore + } + if dcl.StringCanonicalize(des.Truststore, nw.Truststore) { + nw.Truststore = des.Truststore + } + if dcl.StringCanonicalize(des.KeystorePassword, nw.KeystorePassword) { + nw.KeystorePassword = des.KeystorePassword + } + if dcl.StringCanonicalize(des.KeyPassword, nw.KeyPassword) { + nw.KeyPassword = des.KeyPassword + } + if dcl.StringCanonicalize(des.TruststorePassword, nw.TruststorePassword) { + nw.TruststorePassword = des.TruststorePassword + } + if dcl.StringCanonicalize(des.CrossRealmTrustRealm, nw.CrossRealmTrustRealm) { + nw.CrossRealmTrustRealm = des.CrossRealmTrustRealm + } + if dcl.StringCanonicalize(des.CrossRealmTrustKdc, nw.CrossRealmTrustKdc) { + nw.CrossRealmTrustKdc = des.CrossRealmTrustKdc + } + if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, nw.CrossRealmTrustAdminServer) { + nw.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer + } + if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, nw.CrossRealmTrustSharedPassword) { + nw.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword + } + if dcl.StringCanonicalize(des.KdcDbKey, nw.KdcDbKey) { + nw.KdcDbKey = des.KdcDbKey + } + if dcl.StringCanonicalize(des.Realm, nw.Realm) { + nw.Realm = des.Realm + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + + if dcl.StringCanonicalize(des.IdleDeleteTtl, initial.IdleDeleteTtl) || dcl.IsZeroValue(des.IdleDeleteTtl) { + cDes.IdleDeleteTtl = initial.IdleDeleteTtl + } else { + cDes.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.IsZeroValue(des.AutoDeleteTime) || (dcl.IsEmptyValueIndirect(des.AutoDeleteTime) && dcl.IsEmptyValueIndirect(initial.AutoDeleteTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AutoDeleteTime = initial.AutoDeleteTime + } else { + cDes.AutoDeleteTime = des.AutoDeleteTime + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, initial.AutoDeleteTtl) || dcl.IsZeroValue(des.AutoDeleteTtl) { + cDes.AutoDeleteTtl = initial.AutoDeleteTtl + } else { + cDes.AutoDeleteTtl = des.AutoDeleteTtl + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.IdleDeleteTtl, nw.IdleDeleteTtl) { + nw.IdleDeleteTtl = des.IdleDeleteTtl + } + if dcl.StringCanonicalize(des.AutoDeleteTtl, nw.AutoDeleteTtl) { + nw.AutoDeleteTtl = des.AutoDeleteTtl + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, initial.EnableHttpPortAccess) || dcl.IsZeroValue(des.EnableHttpPortAccess) { + cDes.EnableHttpPortAccess = initial.EnableHttpPortAccess + } else { + cDes.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigEndpointConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.EnableHttpPortAccess, nw.EnableHttpPortAccess) { + nw.EnableHttpPortAccess = des.EnableHttpPortAccess + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &d, &n)) +{{- if ne $.TargetVersionName "ga" }} + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + + cDes.NamespacedGkeDeploymentTarget = canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des.NamespacedGkeDeploymentTarget, initial.NamespacedGkeDeploymentTarget, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.NamespacedGkeDeploymentTarget = canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, des.NamespacedGkeDeploymentTarget, nw.NamespacedGkeDeploymentTarget) + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des, initial *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsZeroValue(des.TargetGkeCluster) || (dcl.IsEmptyValueIndirect(des.TargetGkeCluster) && dcl.IsEmptyValueIndirect(initial.TargetGkeCluster)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TargetGkeCluster = initial.TargetGkeCluster + } else { + cDes.TargetGkeCluster = des.TargetGkeCluster + } + if dcl.StringCanonicalize(des.ClusterNamespace, initial.ClusterNamespace) || dcl.IsZeroValue(des.ClusterNamespace) { + cDes.ClusterNamespace = initial.ClusterNamespace + } else { + cDes.ClusterNamespace = des.ClusterNamespace + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ClusterNamespace, nw.ClusterNamespace) { + nw.ClusterNamespace = des.ClusterNamespace + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(des, initial *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + + if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DataprocMetastoreService = initial.DataprocMetastoreService + } else { + cDes.DataprocMetastoreService = des.DataprocMetastoreService + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(des, initial []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, des, nw *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSet(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, des, nw []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &d, &n)) +{{- end }} + } + + return items +} + +func canonicalizeWorkflowTemplatePlacementClusterSelector(des, initial *WorkflowTemplatePlacementClusterSelector, opts ...dcl.ApplyOption) *WorkflowTemplatePlacementClusterSelector { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplatePlacementClusterSelector{} + + if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { + cDes.Zone = initial.Zone + } else { + cDes.Zone = des.Zone + } + if dcl.IsZeroValue(des.ClusterLabels) || (dcl.IsEmptyValueIndirect(des.ClusterLabels) && dcl.IsEmptyValueIndirect(initial.ClusterLabels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ClusterLabels = initial.ClusterLabels + } else { + cDes.ClusterLabels = des.ClusterLabels + } + + return cDes +} + +func canonicalizeWorkflowTemplatePlacementClusterSelectorSlice(des, initial []WorkflowTemplatePlacementClusterSelector, opts ...dcl.ApplyOption) []WorkflowTemplatePlacementClusterSelector { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplatePlacementClusterSelector(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplatePlacementClusterSelector(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelector(c *Client, des, nw *WorkflowTemplatePlacementClusterSelector) *WorkflowTemplatePlacementClusterSelector { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplatePlacementClusterSelector while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Zone, nw.Zone) { + nw.Zone = des.Zone + } + + return nw +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelectorSet(c *Client, des, nw []WorkflowTemplatePlacementClusterSelector) []WorkflowTemplatePlacementClusterSelector { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplatePlacementClusterSelector + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplatePlacementClusterSelectorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplatePlacementClusterSelectorSlice(c *Client, des, nw []WorkflowTemplatePlacementClusterSelector) []WorkflowTemplatePlacementClusterSelector { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplatePlacementClusterSelector + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplatePlacementClusterSelector(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobs(des, initial *WorkflowTemplateJobs, opts ...dcl.ApplyOption) *WorkflowTemplateJobs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobs{} + + if dcl.StringCanonicalize(des.StepId, initial.StepId) || dcl.IsZeroValue(des.StepId) { + cDes.StepId = initial.StepId + } else { + cDes.StepId = des.StepId + } + cDes.HadoopJob = canonicalizeWorkflowTemplateJobsHadoopJob(des.HadoopJob, initial.HadoopJob, opts...) + cDes.SparkJob = canonicalizeWorkflowTemplateJobsSparkJob(des.SparkJob, initial.SparkJob, opts...) + cDes.PysparkJob = canonicalizeWorkflowTemplateJobsPysparkJob(des.PysparkJob, initial.PysparkJob, opts...) + cDes.HiveJob = canonicalizeWorkflowTemplateJobsHiveJob(des.HiveJob, initial.HiveJob, opts...) + cDes.PigJob = canonicalizeWorkflowTemplateJobsPigJob(des.PigJob, initial.PigJob, opts...) + cDes.SparkRJob = canonicalizeWorkflowTemplateJobsSparkRJob(des.SparkRJob, initial.SparkRJob, opts...) + cDes.SparkSqlJob = canonicalizeWorkflowTemplateJobsSparkSqlJob(des.SparkSqlJob, initial.SparkSqlJob, opts...) + cDes.PrestoJob = canonicalizeWorkflowTemplateJobsPrestoJob(des.PrestoJob, initial.PrestoJob, opts...) + if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Labels = initial.Labels + } else { + cDes.Labels = des.Labels + } + cDes.Scheduling = canonicalizeWorkflowTemplateJobsScheduling(des.Scheduling, initial.Scheduling, opts...) + if dcl.StringArrayCanonicalize(des.PrerequisiteStepIds, initial.PrerequisiteStepIds) { + cDes.PrerequisiteStepIds = initial.PrerequisiteStepIds + } else { + cDes.PrerequisiteStepIds = des.PrerequisiteStepIds + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSlice(des, initial []WorkflowTemplateJobs, opts ...dcl.ApplyOption) []WorkflowTemplateJobs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobs, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobs, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobs(c *Client, des, nw *WorkflowTemplateJobs) *WorkflowTemplateJobs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.StepId, nw.StepId) { + nw.StepId = des.StepId + } + nw.HadoopJob = canonicalizeNewWorkflowTemplateJobsHadoopJob(c, des.HadoopJob, nw.HadoopJob) + nw.SparkJob = canonicalizeNewWorkflowTemplateJobsSparkJob(c, des.SparkJob, nw.SparkJob) + nw.PysparkJob = canonicalizeNewWorkflowTemplateJobsPysparkJob(c, des.PysparkJob, nw.PysparkJob) + nw.HiveJob = canonicalizeNewWorkflowTemplateJobsHiveJob(c, des.HiveJob, nw.HiveJob) + nw.PigJob = canonicalizeNewWorkflowTemplateJobsPigJob(c, des.PigJob, nw.PigJob) + nw.SparkRJob = canonicalizeNewWorkflowTemplateJobsSparkRJob(c, des.SparkRJob, nw.SparkRJob) + nw.SparkSqlJob = canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, des.SparkSqlJob, nw.SparkSqlJob) + nw.PrestoJob = canonicalizeNewWorkflowTemplateJobsPrestoJob(c, des.PrestoJob, nw.PrestoJob) + nw.Scheduling = canonicalizeNewWorkflowTemplateJobsScheduling(c, des.Scheduling, nw.Scheduling) + if dcl.StringArrayCanonicalize(des.PrerequisiteStepIds, nw.PrerequisiteStepIds) { + nw.PrerequisiteStepIds = des.PrerequisiteStepIds + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSet(c *Client, des, nw []WorkflowTemplateJobs) []WorkflowTemplateJobs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSlice(c *Client, des, nw []WorkflowTemplateJobs) []WorkflowTemplateJobs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobs(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHadoopJob(des, initial *WorkflowTemplateJobsHadoopJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHadoopJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHadoopJob{} + + if dcl.StringCanonicalize(des.MainJarFileUri, initial.MainJarFileUri) || dcl.IsZeroValue(des.MainJarFileUri) { + cDes.MainJarFileUri = initial.MainJarFileUri + } else { + cDes.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, initial.MainClass) || dcl.IsZeroValue(des.MainClass) { + cDes.MainClass = initial.MainClass + } else { + cDes.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHadoopJobSlice(des, initial []WorkflowTemplateJobsHadoopJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHadoopJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJob(c *Client, des, nw *WorkflowTemplateJobsHadoopJob) *WorkflowTemplateJobsHadoopJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHadoopJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainJarFileUri, nw.MainJarFileUri) { + nw.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, nw.MainClass) { + nw.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobSet(c *Client, des, nw []WorkflowTemplateJobsHadoopJob) []WorkflowTemplateJobsHadoopJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHadoopJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHadoopJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobSlice(c *Client, des, nw []WorkflowTemplateJobsHadoopJob) []WorkflowTemplateJobsHadoopJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHadoopJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(des, initial *WorkflowTemplateJobsHadoopJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHadoopJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsHadoopJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHadoopJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsHadoopJobLoggingConfig) *WorkflowTemplateJobsHadoopJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHadoopJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsHadoopJobLoggingConfig) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHadoopJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsHadoopJobLoggingConfig) []WorkflowTemplateJobsHadoopJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHadoopJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHadoopJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkJob(des, initial *WorkflowTemplateJobsSparkJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkJob{} + + if dcl.StringCanonicalize(des.MainJarFileUri, initial.MainJarFileUri) || dcl.IsZeroValue(des.MainJarFileUri) { + cDes.MainJarFileUri = initial.MainJarFileUri + } else { + cDes.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, initial.MainClass) || dcl.IsZeroValue(des.MainClass) { + cDes.MainClass = initial.MainClass + } else { + cDes.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkJobSlice(des, initial []WorkflowTemplateJobsSparkJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkJob(c *Client, des, nw *WorkflowTemplateJobsSparkJob) *WorkflowTemplateJobsSparkJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainJarFileUri, nw.MainJarFileUri) { + nw.MainJarFileUri = des.MainJarFileUri + } + if dcl.StringCanonicalize(des.MainClass, nw.MainClass) { + nw.MainClass = des.MainClass + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkJob) []WorkflowTemplateJobsSparkJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkJob) []WorkflowTemplateJobsSparkJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkJobLoggingConfig) *WorkflowTemplateJobsSparkJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkJobLoggingConfig) []WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkJobLoggingConfig) []WorkflowTemplateJobsSparkJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPysparkJob(des, initial *WorkflowTemplateJobsPysparkJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPysparkJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPysparkJob{} + + if dcl.StringCanonicalize(des.MainPythonFileUri, initial.MainPythonFileUri) || dcl.IsZeroValue(des.MainPythonFileUri) { + cDes.MainPythonFileUri = initial.MainPythonFileUri + } else { + cDes.MainPythonFileUri = des.MainPythonFileUri + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.PythonFileUris, initial.PythonFileUris) { + cDes.PythonFileUris = initial.PythonFileUris + } else { + cDes.PythonFileUris = des.PythonFileUris + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPysparkJobSlice(des, initial []WorkflowTemplateJobsPysparkJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPysparkJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJob(c *Client, des, nw *WorkflowTemplateJobsPysparkJob) *WorkflowTemplateJobsPysparkJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPysparkJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainPythonFileUri, nw.MainPythonFileUri) { + nw.MainPythonFileUri = des.MainPythonFileUri + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.PythonFileUris, nw.PythonFileUris) { + nw.PythonFileUris = des.PythonFileUris + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobSet(c *Client, des, nw []WorkflowTemplateJobsPysparkJob) []WorkflowTemplateJobsPysparkJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPysparkJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPysparkJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobSlice(c *Client, des, nw []WorkflowTemplateJobsPysparkJob) []WorkflowTemplateJobsPysparkJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPysparkJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(des, initial *WorkflowTemplateJobsPysparkJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPysparkJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPysparkJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPysparkJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPysparkJobLoggingConfig) *WorkflowTemplateJobsPysparkJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPysparkJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPysparkJobLoggingConfig) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPysparkJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPysparkJobLoggingConfig) []WorkflowTemplateJobsPysparkJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPysparkJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPysparkJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHiveJob(des, initial *WorkflowTemplateJobsHiveJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHiveJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHiveJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsHiveJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHiveJobSlice(des, initial []WorkflowTemplateJobsHiveJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHiveJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHiveJob(c *Client, des, nw *WorkflowTemplateJobsHiveJob) *WorkflowTemplateJobsHiveJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHiveJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobSet(c *Client, des, nw []WorkflowTemplateJobsHiveJob) []WorkflowTemplateJobsHiveJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHiveJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHiveJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobSlice(c *Client, des, nw []WorkflowTemplateJobsHiveJob) []WorkflowTemplateJobsHiveJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHiveJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsHiveJobQueryList(des, initial *WorkflowTemplateJobsHiveJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsHiveJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsHiveJobQueryListSlice(des, initial []WorkflowTemplateJobsHiveJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsHiveJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsHiveJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c *Client, des, nw *WorkflowTemplateJobsHiveJobQueryList) *WorkflowTemplateJobsHiveJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsHiveJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsHiveJobQueryList) []WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsHiveJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsHiveJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsHiveJobQueryList) []WorkflowTemplateJobsHiveJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsHiveJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsHiveJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJob(des, initial *WorkflowTemplateJobsPigJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsPigJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobSlice(des, initial []WorkflowTemplateJobsPigJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJob(c *Client, des, nw *WorkflowTemplateJobsPigJob) *WorkflowTemplateJobsPigJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobSet(c *Client, des, nw []WorkflowTemplateJobsPigJob) []WorkflowTemplateJobsPigJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobSlice(c *Client, des, nw []WorkflowTemplateJobsPigJob) []WorkflowTemplateJobsPigJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJobQueryList(des, initial *WorkflowTemplateJobsPigJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobQueryListSlice(des, initial []WorkflowTemplateJobsPigJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c *Client, des, nw *WorkflowTemplateJobsPigJobQueryList) *WorkflowTemplateJobsPigJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsPigJobQueryList) []WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsPigJobQueryList) []WorkflowTemplateJobsPigJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(des, initial *WorkflowTemplateJobsPigJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPigJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPigJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPigJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPigJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPigJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPigJobLoggingConfig) *WorkflowTemplateJobsPigJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPigJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPigJobLoggingConfig) []WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPigJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPigJobLoggingConfig) []WorkflowTemplateJobsPigJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPigJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPigJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkRJob(des, initial *WorkflowTemplateJobsSparkRJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkRJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkRJob{} + + if dcl.StringCanonicalize(des.MainRFileUri, initial.MainRFileUri) || dcl.IsZeroValue(des.MainRFileUri) { + cDes.MainRFileUri = initial.MainRFileUri + } else { + cDes.MainRFileUri = des.MainRFileUri + } + if dcl.StringArrayCanonicalize(des.Args, initial.Args) { + cDes.Args = initial.Args + } else { + cDes.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.FileUris, initial.FileUris) { + cDes.FileUris = initial.FileUris + } else { + cDes.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, initial.ArchiveUris) { + cDes.ArchiveUris = initial.ArchiveUris + } else { + cDes.ArchiveUris = des.ArchiveUris + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkRJobSlice(des, initial []WorkflowTemplateJobsSparkRJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkRJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJob(c *Client, des, nw *WorkflowTemplateJobsSparkRJob) *WorkflowTemplateJobsSparkRJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkRJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.MainRFileUri, nw.MainRFileUri) { + nw.MainRFileUri = des.MainRFileUri + } + if dcl.StringArrayCanonicalize(des.Args, nw.Args) { + nw.Args = des.Args + } + if dcl.StringArrayCanonicalize(des.FileUris, nw.FileUris) { + nw.FileUris = des.FileUris + } + if dcl.StringArrayCanonicalize(des.ArchiveUris, nw.ArchiveUris) { + nw.ArchiveUris = des.ArchiveUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkRJob) []WorkflowTemplateJobsSparkRJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkRJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkRJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkRJob) []WorkflowTemplateJobsSparkRJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkRJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkRJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkRJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkRJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkRJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkRJobLoggingConfig) *WorkflowTemplateJobsSparkRJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkRJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkRJobLoggingConfig) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkRJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkRJobLoggingConfig) []WorkflowTemplateJobsSparkRJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkRJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkRJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJob(des, initial *WorkflowTemplateJobsSparkSqlJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.IsZeroValue(des.ScriptVariables) || (dcl.IsEmptyValueIndirect(des.ScriptVariables) && dcl.IsEmptyValueIndirect(initial.ScriptVariables)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ScriptVariables = initial.ScriptVariables + } else { + cDes.ScriptVariables = des.ScriptVariables + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + if dcl.StringArrayCanonicalize(des.JarFileUris, initial.JarFileUris) { + cDes.JarFileUris = initial.JarFileUris + } else { + cDes.JarFileUris = des.JarFileUris + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobSlice(des, initial []WorkflowTemplateJobsSparkSqlJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJob) *WorkflowTemplateJobsSparkSqlJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.StringArrayCanonicalize(des.JarFileUris, nw.JarFileUris) { + nw.JarFileUris = des.JarFileUris + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJob) []WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJob) []WorkflowTemplateJobsSparkSqlJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(des, initial *WorkflowTemplateJobsSparkSqlJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobQueryListSlice(des, initial []WorkflowTemplateJobsSparkSqlJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJobQueryList) *WorkflowTemplateJobsSparkSqlJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobQueryList) []WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobQueryList) []WorkflowTemplateJobsSparkSqlJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(des, initial *WorkflowTemplateJobsSparkSqlJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsSparkSqlJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsSparkSqlJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsSparkSqlJobLoggingConfig) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsSparkSqlJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobLoggingConfig) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsSparkSqlJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsSparkSqlJobLoggingConfig) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsSparkSqlJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJob(des, initial *WorkflowTemplateJobsPrestoJob, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJob { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJob{} + + if dcl.StringCanonicalize(des.QueryFileUri, initial.QueryFileUri) || dcl.IsZeroValue(des.QueryFileUri) { + cDes.QueryFileUri = initial.QueryFileUri + } else { + cDes.QueryFileUri = des.QueryFileUri + } + cDes.QueryList = canonicalizeWorkflowTemplateJobsPrestoJobQueryList(des.QueryList, initial.QueryList, opts...) + if dcl.BoolCanonicalize(des.ContinueOnFailure, initial.ContinueOnFailure) || dcl.IsZeroValue(des.ContinueOnFailure) { + cDes.ContinueOnFailure = initial.ContinueOnFailure + } else { + cDes.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringCanonicalize(des.OutputFormat, initial.OutputFormat) || dcl.IsZeroValue(des.OutputFormat) { + cDes.OutputFormat = initial.OutputFormat + } else { + cDes.OutputFormat = des.OutputFormat + } + if dcl.StringArrayCanonicalize(des.ClientTags, initial.ClientTags) { + cDes.ClientTags = initial.ClientTags + } else { + cDes.ClientTags = des.ClientTags + } + if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Properties = initial.Properties + } else { + cDes.Properties = des.Properties + } + cDes.LoggingConfig = canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobSlice(des, initial []WorkflowTemplateJobsPrestoJob, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJob { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJob(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJob(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJob(c *Client, des, nw *WorkflowTemplateJobsPrestoJob) *WorkflowTemplateJobsPrestoJob { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJob while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.QueryFileUri, nw.QueryFileUri) { + nw.QueryFileUri = des.QueryFileUri + } + nw.QueryList = canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, des.QueryList, nw.QueryList) + if dcl.BoolCanonicalize(des.ContinueOnFailure, nw.ContinueOnFailure) { + nw.ContinueOnFailure = des.ContinueOnFailure + } + if dcl.StringCanonicalize(des.OutputFormat, nw.OutputFormat) { + nw.OutputFormat = des.OutputFormat + } + if dcl.StringArrayCanonicalize(des.ClientTags, nw.ClientTags) { + nw.ClientTags = des.ClientTags + } + nw.LoggingConfig = canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJob) []WorkflowTemplateJobsPrestoJob { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJob + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJob(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJob) []WorkflowTemplateJobsPrestoJob { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJob + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJob(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJobQueryList(des, initial *WorkflowTemplateJobsPrestoJobQueryList, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJobQueryList{} + + if dcl.StringArrayCanonicalize(des.Queries, initial.Queries) { + cDes.Queries = initial.Queries + } else { + cDes.Queries = des.Queries + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobQueryListSlice(des, initial []WorkflowTemplateJobsPrestoJobQueryList, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJobQueryList { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobQueryList(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobQueryList(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c *Client, des, nw *WorkflowTemplateJobsPrestoJobQueryList) *WorkflowTemplateJobsPrestoJobQueryList { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJobQueryList while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Queries, nw.Queries) { + nw.Queries = des.Queries + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryListSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJobQueryList) []WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJobQueryList + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobQueryListNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJobQueryList) []WorkflowTemplateJobsPrestoJobQueryList { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJobQueryList + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobQueryList(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(des, initial *WorkflowTemplateJobsPrestoJobLoggingConfig, opts ...dcl.ApplyOption) *WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsPrestoJobLoggingConfig{} + + if dcl.IsZeroValue(des.DriverLogLevels) || (dcl.IsEmptyValueIndirect(des.DriverLogLevels) && dcl.IsEmptyValueIndirect(initial.DriverLogLevels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DriverLogLevels = initial.DriverLogLevels + } else { + cDes.DriverLogLevels = des.DriverLogLevels + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfigSlice(des, initial []WorkflowTemplateJobsPrestoJobLoggingConfig, opts ...dcl.ApplyOption) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsPrestoJobLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, des, nw *WorkflowTemplateJobsPrestoJobLoggingConfig) *WorkflowTemplateJobsPrestoJobLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsPrestoJobLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfigSet(c *Client, des, nw []WorkflowTemplateJobsPrestoJobLoggingConfig) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsPrestoJobLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, des, nw []WorkflowTemplateJobsPrestoJobLoggingConfig) []WorkflowTemplateJobsPrestoJobLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsPrestoJobLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsPrestoJobLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateJobsScheduling(des, initial *WorkflowTemplateJobsScheduling, opts ...dcl.ApplyOption) *WorkflowTemplateJobsScheduling { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateJobsScheduling{} + + if dcl.IsZeroValue(des.MaxFailuresPerHour) || (dcl.IsEmptyValueIndirect(des.MaxFailuresPerHour) && dcl.IsEmptyValueIndirect(initial.MaxFailuresPerHour)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxFailuresPerHour = initial.MaxFailuresPerHour + } else { + cDes.MaxFailuresPerHour = des.MaxFailuresPerHour + } + if dcl.IsZeroValue(des.MaxFailuresTotal) || (dcl.IsEmptyValueIndirect(des.MaxFailuresTotal) && dcl.IsEmptyValueIndirect(initial.MaxFailuresTotal)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MaxFailuresTotal = initial.MaxFailuresTotal + } else { + cDes.MaxFailuresTotal = des.MaxFailuresTotal + } + + return cDes +} + +func canonicalizeWorkflowTemplateJobsSchedulingSlice(des, initial []WorkflowTemplateJobsScheduling, opts ...dcl.ApplyOption) []WorkflowTemplateJobsScheduling { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateJobsScheduling, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateJobsScheduling(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateJobsScheduling, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateJobsScheduling(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateJobsScheduling(c *Client, des, nw *WorkflowTemplateJobsScheduling) *WorkflowTemplateJobsScheduling { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateJobsScheduling while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewWorkflowTemplateJobsSchedulingSet(c *Client, des, nw []WorkflowTemplateJobsScheduling) []WorkflowTemplateJobsScheduling { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateJobsScheduling + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateJobsSchedulingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateJobsScheduling(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateJobsSchedulingSlice(c *Client, des, nw []WorkflowTemplateJobsScheduling) []WorkflowTemplateJobsScheduling { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateJobsScheduling + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateJobsScheduling(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParameters(des, initial *WorkflowTemplateParameters, opts ...dcl.ApplyOption) *WorkflowTemplateParameters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParameters{} + + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.StringArrayCanonicalize(des.Fields, initial.Fields) { + cDes.Fields = initial.Fields + } else { + cDes.Fields = des.Fields + } + if dcl.StringCanonicalize(des.Description, initial.Description) || dcl.IsZeroValue(des.Description) { + cDes.Description = initial.Description + } else { + cDes.Description = des.Description + } + cDes.Validation = canonicalizeWorkflowTemplateParametersValidation(des.Validation, initial.Validation, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateParametersSlice(des, initial []WorkflowTemplateParameters, opts ...dcl.ApplyOption) []WorkflowTemplateParameters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParameters, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParameters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParameters, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParameters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParameters(c *Client, des, nw *WorkflowTemplateParameters) *WorkflowTemplateParameters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParameters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + if dcl.StringArrayCanonicalize(des.Fields, nw.Fields) { + nw.Fields = des.Fields + } + if dcl.StringCanonicalize(des.Description, nw.Description) { + nw.Description = des.Description + } + nw.Validation = canonicalizeNewWorkflowTemplateParametersValidation(c, des.Validation, nw.Validation) + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersSet(c *Client, des, nw []WorkflowTemplateParameters) []WorkflowTemplateParameters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParameters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParameters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersSlice(c *Client, des, nw []WorkflowTemplateParameters) []WorkflowTemplateParameters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParameters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParameters(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidation(des, initial *WorkflowTemplateParametersValidation, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidation { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidation{} + + cDes.Regex = canonicalizeWorkflowTemplateParametersValidationRegex(des.Regex, initial.Regex, opts...) + cDes.Values = canonicalizeWorkflowTemplateParametersValidationValues(des.Values, initial.Values, opts...) + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationSlice(des, initial []WorkflowTemplateParametersValidation, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidation { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidation, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidation(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidation, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidation(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidation(c *Client, des, nw *WorkflowTemplateParametersValidation) *WorkflowTemplateParametersValidation { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidation while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Regex = canonicalizeNewWorkflowTemplateParametersValidationRegex(c, des.Regex, nw.Regex) + nw.Values = canonicalizeNewWorkflowTemplateParametersValidationValues(c, des.Values, nw.Values) + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationSet(c *Client, des, nw []WorkflowTemplateParametersValidation) []WorkflowTemplateParametersValidation { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidation + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidation(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationSlice(c *Client, des, nw []WorkflowTemplateParametersValidation) []WorkflowTemplateParametersValidation { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidation + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidation(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidationRegex(des, initial *WorkflowTemplateParametersValidationRegex, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidationRegex { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidationRegex{} + + if dcl.StringArrayCanonicalize(des.Regexes, initial.Regexes) { + cDes.Regexes = initial.Regexes + } else { + cDes.Regexes = des.Regexes + } + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationRegexSlice(des, initial []WorkflowTemplateParametersValidationRegex, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidationRegex { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationRegex(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationRegex(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegex(c *Client, des, nw *WorkflowTemplateParametersValidationRegex) *WorkflowTemplateParametersValidationRegex { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidationRegex while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Regexes, nw.Regexes) { + nw.Regexes = des.Regexes + } + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegexSet(c *Client, des, nw []WorkflowTemplateParametersValidationRegex) []WorkflowTemplateParametersValidationRegex { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidationRegex + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationRegexNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationRegex(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationRegexSlice(c *Client, des, nw []WorkflowTemplateParametersValidationRegex) []WorkflowTemplateParametersValidationRegex { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidationRegex + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationRegex(c, &d, &n)) + } + + return items +} + +func canonicalizeWorkflowTemplateParametersValidationValues(des, initial *WorkflowTemplateParametersValidationValues, opts ...dcl.ApplyOption) *WorkflowTemplateParametersValidationValues { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &WorkflowTemplateParametersValidationValues{} + + if dcl.StringArrayCanonicalize(des.Values, initial.Values) { + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + + return cDes +} + +func canonicalizeWorkflowTemplateParametersValidationValuesSlice(des, initial []WorkflowTemplateParametersValidationValues, opts ...dcl.ApplyOption) []WorkflowTemplateParametersValidationValues { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(des)) + for _, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationValues(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(des)) + for i, d := range des { + cd := canonicalizeWorkflowTemplateParametersValidationValues(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewWorkflowTemplateParametersValidationValues(c *Client, des, nw *WorkflowTemplateParametersValidationValues) *WorkflowTemplateParametersValidationValues { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for WorkflowTemplateParametersValidationValues while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Values, nw.Values) { + nw.Values = des.Values + } + + return nw +} + +func canonicalizeNewWorkflowTemplateParametersValidationValuesSet(c *Client, des, nw []WorkflowTemplateParametersValidationValues) []WorkflowTemplateParametersValidationValues { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []WorkflowTemplateParametersValidationValues + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareWorkflowTemplateParametersValidationValuesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationValues(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewWorkflowTemplateParametersValidationValuesSlice(c *Client, des, nw []WorkflowTemplateParametersValidationValues) []WorkflowTemplateParametersValidationValues { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []WorkflowTemplateParametersValidationValues + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewWorkflowTemplateParametersValidationValues(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffWorkflowTemplate(c *Client, desired, actual *WorkflowTemplate, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateEncryptionConfigNewStyle, EmptyObject: EmptyWorkflowTemplateEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Placement, actual.Placement, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementNewStyle, EmptyObject: EmptyWorkflowTemplatePlacement, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Placement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Jobs, actual.Jobs, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsNewStyle, EmptyObject: EmptyWorkflowTemplateJobs, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Jobs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Parameters, actual.Parameters, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersNewStyle, EmptyObject: EmptyWorkflowTemplateParameters, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Parameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DagTimeout, actual.DagTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DagTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareWorkflowTemplateEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateEncryptionConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateEncryptionConfig or *WorkflowTemplateEncryptionConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateEncryptionConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateEncryptionConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKey")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacement) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacement or *WorkflowTemplatePlacement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacement) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacement) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ManagedCluster, actual.ManagedCluster, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedCluster, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterSelector, actual.ClusterSelector, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementClusterSelectorNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementClusterSelector, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterSelector")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedCluster) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedCluster or *WorkflowTemplatePlacementManagedCluster", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedCluster) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedCluster", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ClusterName, actual.ClusterName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfig or *WorkflowTemplatePlacementManagedClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TempBucket, actual.TempBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TempBucket")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GceClusterConfig, actual.GceClusterConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GceClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MasterConfig, actual.MasterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MasterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SoftwareConfig, actual.SoftwareConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SoftwareConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InitializationActions, actual.InitializationActions, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InitializationActions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoscalingConfig, actual.AutoscalingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoscalingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecurityConfig, actual.SecurityConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LifecycleConfig, actual.LifecycleConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LifecycleConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EndpointConfig, actual.EndpointConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EndpointConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- if ne $.TargetVersionName "ga" }} + + if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } +{{- end }} + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ZoneUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Network, actual.Network, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Subnetwork, actual.Subnetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetworkUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InternalIPOnly, actual.InternalIPOnly, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InternalIpOnly")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateIPv6GoogleAccess, actual.PrivateIPv6GoogleAccess, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateIpv6GoogleAccess")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceAccountScopes, actual.ServiceAccountScopes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountScopes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{Type: "Set", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReservationAffinity, actual.ReservationAffinity, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReservationAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodeGroupAffinity, actual.NodeGroupAffinity, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ShieldedInstanceConfig, actual.ShieldedInstanceConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ShieldedInstanceConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConsumeReservationType, actual.ConsumeReservationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConsumeReservationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NodeGroup, actual.NodeGroup, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig or *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableSecureBoot, actual.EnableSecureBoot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSecureBoot")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableVtpm, actual.EnableVtpm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableVtpm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableIntegrityMonitoring, actual.EnableIntegrityMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableIntegrityMonitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators or *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig or *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ImageVersion, actual.ImageVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.OptionalComponents, actual.OptionalComponents, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OptionalComponents")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigInitializationActionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigInitializationActions or *WorkflowTemplatePlacementManagedClusterConfigInitializationActions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigInitializationActions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ExecutableFile, actual.ExecutableFile, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutableFile")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig or *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GcePdKmsKeyName, actual.GcePdKmsKeyName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcePdKmsKeyName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig or *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Policy, actual.Policy, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfig or *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KerberosConfig, actual.KerberosConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KerberosConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig or *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.EnableKerberos, actual.EnableKerberos, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableKerberos")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.RootPrincipalPassword, actual.RootPrincipalPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootPrincipalPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Keystore, actual.Keystore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Truststore, actual.Truststore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststoreUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeystorePassword, actual.KeystorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KeyPassword, actual.KeyPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TruststorePassword, actual.TruststorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststorePasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustRealm, actual.CrossRealmTrustRealm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustRealm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustKdc, actual.CrossRealmTrustKdc, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustKdc")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustAdminServer, actual.CrossRealmTrustAdminServer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustAdminServer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CrossRealmTrustSharedPassword, actual.CrossRealmTrustSharedPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustSharedPasswordUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KdcDbKey, actual.KdcDbKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KdcDbKeyUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TgtLifetimeHours, actual.TgtLifetimeHours, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TgtLifetimeHours")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Realm, actual.Realm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Realm")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig or *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.IdleDeleteTtl, actual.IdleDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTime, actual.AutoDeleteTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AutoDeleteTtl, actual.AutoDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTtl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdleStartTime, actual.IdleStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleStartTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigEndpointConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEndpointConfig or *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpPorts, actual.HttpPorts, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpPorts")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHttpPortAccess, actual.EnableHttpPortAccess, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHttpPortAccess")); len(ds) != 0 || err != nil { +{{- if ne $.TargetVersionName "ga" }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig or *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.NamespacedGkeDeploymentTarget, actual.NamespacedGkeDeploymentTarget, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle, EmptyObject: EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NamespacedGkeDeploymentTarget")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget or *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetGkeCluster, actual.TargetGkeCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetGkeCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterNamespace, actual.ClusterNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig or *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { +{{- end }} + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplatePlacementClusterSelectorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplatePlacementClusterSelector) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplatePlacementClusterSelector) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementClusterSelector or *WorkflowTemplatePlacementClusterSelector", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplatePlacementClusterSelector) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplatePlacementClusterSelector) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplatePlacementClusterSelector", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Zone")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterLabels, actual.ClusterLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobs) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobs) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobs or *WorkflowTemplateJobs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobs) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobs) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.StepId, actual.StepId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StepId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HadoopJob, actual.HadoopJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHadoopJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHadoopJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HadoopJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkJob, actual.SparkJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PysparkJob, actual.PysparkJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPysparkJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPysparkJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PysparkJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HiveJob, actual.HiveJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHiveJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHiveJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HiveJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PigJob, actual.PigJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PigJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkRJob, actual.SparkRJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkRJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkRJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkRJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SparkSqlJob, actual.SparkSqlJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkSqlJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrestoJob, actual.PrestoJob, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJob, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrestoJob")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Scheduling, actual.Scheduling, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSchedulingNewStyle, EmptyObject: EmptyWorkflowTemplateJobsScheduling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Scheduling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrerequisiteStepIds, actual.PrerequisiteStepIds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrerequisiteStepIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHadoopJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHadoopJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHadoopJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJob or *WorkflowTemplateJobsHadoopJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHadoopJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHadoopJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainJarFileUri, actual.MainJarFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainJarFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainClass, actual.MainClass, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainClass")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHadoopJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHadoopJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJobLoggingConfig or *WorkflowTemplateJobsHadoopJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHadoopJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHadoopJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJob or *WorkflowTemplateJobsSparkJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainJarFileUri, actual.MainJarFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainJarFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MainClass, actual.MainClass, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainClass")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJobLoggingConfig or *WorkflowTemplateJobsSparkJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPysparkJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPysparkJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPysparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJob or *WorkflowTemplateJobsPysparkJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPysparkJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPysparkJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainPythonFileUri, actual.MainPythonFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainPythonFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PythonFileUris, actual.PythonFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PythonFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPysparkJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPysparkJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJobLoggingConfig or *WorkflowTemplateJobsPysparkJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPysparkJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPysparkJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHiveJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHiveJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHiveJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJob or *WorkflowTemplateJobsHiveJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHiveJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHiveJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsHiveJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsHiveJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsHiveJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsHiveJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsHiveJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJobQueryList or *WorkflowTemplateJobsHiveJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsHiveJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsHiveJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsHiveJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJob or *WorkflowTemplateJobsPigJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPigJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobQueryList or *WorkflowTemplateJobsPigJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPigJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobLoggingConfig or *WorkflowTemplateJobsPigJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPigJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPigJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkRJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkRJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkRJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJob or *WorkflowTemplateJobsSparkRJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkRJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkRJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MainRFileUri, actual.MainRFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MainRFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Args")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FileUris, actual.FileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("FileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ArchiveUris, actual.ArchiveUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ArchiveUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkRJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkRJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJobLoggingConfig or *WorkflowTemplateJobsSparkRJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkRJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkRJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJob or *WorkflowTemplateJobsSparkSqlJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ScriptVariables, actual.ScriptVariables, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ScriptVariables")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.JarFileUris, actual.JarFileUris, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("JarFileUris")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobQueryList or *WorkflowTemplateJobsSparkSqlJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSparkSqlJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobLoggingConfig or *WorkflowTemplateJobsSparkSqlJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsSparkSqlJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsSparkSqlJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJob) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJob or *WorkflowTemplateJobsPrestoJob", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJob) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJob) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJob", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.QueryFileUri, actual.QueryFileUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryFileUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.QueryList, actual.QueryList, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobQueryListNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJobQueryList, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("QueryList")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContinueOnFailure, actual.ContinueOnFailure, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContinueOnFailure")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.OutputFormat, actual.OutputFormat, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OutputFormat")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClientTags, actual.ClientTags, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClientTags")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle, EmptyObject: EmptyWorkflowTemplateJobsPrestoJobLoggingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobQueryListNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobQueryList or *WorkflowTemplateJobsPrestoJobQueryList", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJobQueryList) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobQueryList", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Queries, actual.Queries, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Queries")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsPrestoJobLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobLoggingConfig or *WorkflowTemplateJobsPrestoJobLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsPrestoJobLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsPrestoJobLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DriverLogLevels, actual.DriverLogLevels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DriverLogLevels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateJobsSchedulingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateJobsScheduling) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateJobsScheduling) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsScheduling or *WorkflowTemplateJobsScheduling", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateJobsScheduling) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateJobsScheduling) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateJobsScheduling", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MaxFailuresPerHour, actual.MaxFailuresPerHour, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxFailuresPerHour")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MaxFailuresTotal, actual.MaxFailuresTotal, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxFailuresTotal")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParameters) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParameters or *WorkflowTemplateParameters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParameters) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParameters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fields, actual.Fields, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fields")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Validation, actual.Validation, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidation, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Validation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidation) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidation) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidation or *WorkflowTemplateParametersValidation", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidation) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidation) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidation", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Regex, actual.Regex, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationRegexNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidationRegex, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Regex")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{ObjectFunction: compareWorkflowTemplateParametersValidationValuesNewStyle, EmptyObject: EmptyWorkflowTemplateParametersValidationValues, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationRegexNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidationRegex) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidationRegex) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationRegex or *WorkflowTemplateParametersValidationRegex", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidationRegex) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidationRegex) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationRegex", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Regexes, actual.Regexes, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Regexes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareWorkflowTemplateParametersValidationValuesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*WorkflowTemplateParametersValidationValues) + if !ok { + desiredNotPointer, ok := d.(WorkflowTemplateParametersValidationValues) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationValues or *WorkflowTemplateParametersValidationValues", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*WorkflowTemplateParametersValidationValues) + if !ok { + actualNotPointer, ok := a.(WorkflowTemplateParametersValidationValues) + if !ok { + return nil, fmt.Errorf("obj %v is not a WorkflowTemplateParametersValidationValues", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *WorkflowTemplate) urlNormalized() *WorkflowTemplate { + normalized := dcl.Copy(*r).(WorkflowTemplate) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DagTimeout = dcl.SelfLinkToName(r.DagTimeout) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *WorkflowTemplate) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the WorkflowTemplate resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *WorkflowTemplate) marshal(c *Client) ([]byte, error) { + m, err := expandWorkflowTemplate(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling WorkflowTemplate: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalWorkflowTemplate decodes JSON responses into the WorkflowTemplate resource schema. +func unmarshalWorkflowTemplate(b []byte, c *Client, res *WorkflowTemplate) (*WorkflowTemplate, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapWorkflowTemplate(m, c, res) +} + +func unmarshalMapWorkflowTemplate(m map[string]interface{}, c *Client, res *WorkflowTemplate) (*WorkflowTemplate, error) { + + flattened := flattenWorkflowTemplate(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandWorkflowTemplate expands WorkflowTemplate into a JSON request object. +func expandWorkflowTemplate(c *Client, f *WorkflowTemplate) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandWorkflowTemplateEncryptionConfig(c, f.EncryptionConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["encryptionConfig"] = v + } + if v, err := expandWorkflowTemplatePlacement(c, f.Placement, res); err != nil { + return nil, fmt.Errorf("error expanding Placement into placement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["placement"] = v + } + if v, err := expandWorkflowTemplateJobsSlice(c, f.Jobs, res); err != nil { + return nil, fmt.Errorf("error expanding Jobs into jobs: %w", err) + } else if v != nil { + m["jobs"] = v + } + if v, err := expandWorkflowTemplateParametersSlice(c, f.Parameters, res); err != nil { + return nil, fmt.Errorf("error expanding Parameters into parameters: %w", err) + } else if v != nil { + m["parameters"] = v + } + if v := f.DagTimeout; dcl.ValueShouldBeSent(v) { + m["dagTimeout"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenWorkflowTemplate flattens WorkflowTemplate from a JSON request object into the +// WorkflowTemplate type. +func flattenWorkflowTemplate(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplate { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &WorkflowTemplate{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Version = dcl.FlattenInteger(m["version"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.EncryptionConfig = flattenWorkflowTemplateEncryptionConfig(c, m["encryptionConfig"], res) + resultRes.Placement = flattenWorkflowTemplatePlacement(c, m["placement"], res) + resultRes.Jobs = flattenWorkflowTemplateJobsSlice(c, m["jobs"], res) + resultRes.Parameters = flattenWorkflowTemplateParametersSlice(c, m["parameters"], res) + resultRes.DagTimeout = dcl.FlattenString(m["dagTimeout"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandWorkflowTemplateEncryptionConfigMap expands the contents of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfigMap(c *Client, f map[string]WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateEncryptionConfigSlice expands the contents of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfigSlice(c *Client, f []WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateEncryptionConfigMap flattens the contents of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateEncryptionConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateEncryptionConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateEncryptionConfig{} + } + + items := make(map[string]WorkflowTemplateEncryptionConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateEncryptionConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateEncryptionConfigSlice flattens the contents of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateEncryptionConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateEncryptionConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateEncryptionConfig{} + } + + items := make([]WorkflowTemplateEncryptionConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateEncryptionConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateEncryptionConfig expands an instance of WorkflowTemplateEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplateEncryptionConfig(c *Client, f *WorkflowTemplateEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { + m["kmsKey"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateEncryptionConfig flattens an instance of WorkflowTemplateEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplateEncryptionConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateEncryptionConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateEncryptionConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateEncryptionConfig + } + r.KmsKey = dcl.FlattenString(m["kmsKey"]) + + return r +} + +// expandWorkflowTemplatePlacementMap expands the contents of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacementMap(c *Client, f map[string]WorkflowTemplatePlacement, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementSlice expands the contents of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacementSlice(c *Client, f []WorkflowTemplatePlacement, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementMap flattens the contents of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacementMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacement{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacement{} + } + + items := make(map[string]WorkflowTemplatePlacement) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementSlice flattens the contents of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacementSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacement { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacement{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacement{} + } + + items := make([]WorkflowTemplatePlacement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacement expands an instance of WorkflowTemplatePlacement into a JSON +// request object. +func expandWorkflowTemplatePlacement(c *Client, f *WorkflowTemplatePlacement, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedCluster(c, f.ManagedCluster, res); err != nil { + return nil, fmt.Errorf("error expanding ManagedCluster into managedCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["managedCluster"] = v + } + if v, err := expandWorkflowTemplatePlacementClusterSelector(c, f.ClusterSelector, res); err != nil { + return nil, fmt.Errorf("error expanding ClusterSelector into clusterSelector: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["clusterSelector"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacement flattens an instance of WorkflowTemplatePlacement from a JSON +// response object. +func flattenWorkflowTemplatePlacement(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacement + } + r.ManagedCluster = flattenWorkflowTemplatePlacementManagedCluster(c, m["managedCluster"], res) + r.ClusterSelector = flattenWorkflowTemplatePlacementClusterSelector(c, m["clusterSelector"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterMap expands the contents of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterMap(c *Client, f map[string]WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedCluster(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterSlice expands the contents of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterSlice(c *Client, f []WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedCluster(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterMap flattens the contents of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedCluster { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedCluster{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedCluster{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedCluster) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedCluster(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterSlice flattens the contents of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedCluster { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedCluster{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedCluster{} + } + + items := make([]WorkflowTemplatePlacementManagedCluster, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedCluster(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedCluster expands an instance of WorkflowTemplatePlacementManagedCluster into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedCluster(c *Client, f *WorkflowTemplatePlacementManagedCluster, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ClusterName; !dcl.IsEmptyValueIndirect(v) { + m["clusterName"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, f.Config, res); err != nil { + return nil, fmt.Errorf("error expanding Config into config: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["config"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedCluster flattens an instance of WorkflowTemplatePlacementManagedCluster from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedCluster(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedCluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedCluster{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedCluster + } + r.ClusterName = dcl.FlattenString(m["clusterName"]) + r.Config = flattenWorkflowTemplatePlacementManagedClusterConfig(c, m["config"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { + m["configBucket"] = v + } + if v := f.TempBucket; !dcl.IsEmptyValueIndirect(v) { + m["tempBucket"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, f.GceClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GceClusterConfig into gceClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gceClusterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, f.MasterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MasterConfig into masterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["masterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["workerConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["secondaryWorkerConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, f.SoftwareConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SoftwareConfig into softwareConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["softwareConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, f.InitializationActions, res); err != nil { + return nil, fmt.Errorf("error expanding InitializationActions into initializationActions: %w", err) + } else if v != nil { + m["initializationActions"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, f.EncryptionConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["encryptionConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, f.AutoscalingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding AutoscalingConfig into autoscalingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["autoscalingConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, f.SecurityConfig, res); err != nil { + return nil, fmt.Errorf("error expanding SecurityConfig into securityConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["securityConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, f.LifecycleConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LifecycleConfig into lifecycleConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["lifecycleConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, f.EndpointConfig, res); err != nil { + return nil, fmt.Errorf("error expanding EndpointConfig into endpointConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["endpointConfig"] = v + } +{{- if ne $.TargetVersionName "ga" }} + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gkeClusterConfig"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { + return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["metastoreConfig"] = v + } +{{- end }} + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfig + } + r.StagingBucket = dcl.FlattenString(m["configBucket"]) + r.TempBucket = dcl.FlattenString(m["tempBucket"]) + r.GceClusterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, m["gceClusterConfig"], res) + r.MasterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, m["masterConfig"], res) + r.WorkerConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, m["workerConfig"], res) + r.SecondaryWorkerConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) + r.SoftwareConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, m["softwareConfig"], res) + r.InitializationActions = flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c, m["initializationActions"], res) + r.EncryptionConfig = flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, m["encryptionConfig"], res) + r.AutoscalingConfig = flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, m["autoscalingConfig"], res) + r.SecurityConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, m["securityConfig"], res) + r.LifecycleConfig = flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, m["lifecycleConfig"], res) + r.EndpointConfig = flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, m["endpointConfig"], res) +{{- if ne $.TargetVersionName "ga" }} + r.GkeClusterConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) + r.MetastoreConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, m["metastoreConfig"], res) +{{- end }} + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { + m["zoneUri"] = v + } + if v := f.Network; !dcl.IsEmptyValueIndirect(v) { + m["networkUri"] = v + } + if v := f.Subnetwork; !dcl.IsEmptyValueIndirect(v) { + m["subnetworkUri"] = v + } + if v := f.InternalIPOnly; !dcl.IsEmptyValueIndirect(v) { + m["internalIpOnly"] = v + } + if v := f.PrivateIPv6GoogleAccess; !dcl.IsEmptyValueIndirect(v) { + m["privateIpv6GoogleAccess"] = v + } + if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { + m["serviceAccount"] = v + } + if v := f.ServiceAccountScopes; v != nil { + m["serviceAccountScopes"] = v + } + if v := f.Tags; v != nil { + m["tags"] = v + } + if v := f.Metadata; !dcl.IsEmptyValueIndirect(v) { + m["metadata"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, f.ReservationAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding ReservationAffinity into reservationAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["reservationAffinity"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, f.NodeGroupAffinity, res); err != nil { + return nil, fmt.Errorf("error expanding NodeGroupAffinity into nodeGroupAffinity: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupAffinity"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, f.ShieldedInstanceConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ShieldedInstanceConfig into shieldedInstanceConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["shieldedInstanceConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig + } + r.Zone = dcl.FlattenString(m["zoneUri"]) + r.Network = dcl.FlattenString(m["networkUri"]) + r.Subnetwork = dcl.FlattenString(m["subnetworkUri"]) + r.InternalIPOnly = dcl.FlattenBool(m["internalIpOnly"]) + r.PrivateIPv6GoogleAccess = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(m["privateIpv6GoogleAccess"]) + r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) + r.ServiceAccountScopes = dcl.FlattenStringSlice(m["serviceAccountScopes"]) + r.Tags = dcl.FlattenStringSlice(m["tags"]) + r.Metadata = dcl.FlattenKeyValuePairs(m["metadata"]) + r.ReservationAffinity = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, m["reservationAffinity"], res) + r.NodeGroupAffinity = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, m["nodeGroupAffinity"], res) + r.ShieldedInstanceConfig = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, m["shieldedInstanceConfig"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ConsumeReservationType; !dcl.IsEmptyValueIndirect(v) { + m["consumeReservationType"] = v + } + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Values; v != nil { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity + } + r.ConsumeReservationType = flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(m["consumeReservationType"]) + r.Key = dcl.FlattenString(m["key"]) + r.Values = dcl.FlattenStringSlice(m["values"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NodeGroup; !dcl.IsEmptyValueIndirect(v) { + m["nodeGroupUri"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity + } + r.NodeGroup = dcl.FlattenString(m["nodeGroupUri"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableSecureBoot; !dcl.IsEmptyValueIndirect(v) { + m["enableSecureBoot"] = v + } + if v := f.EnableVtpm; !dcl.IsEmptyValueIndirect(v) { + m["enableVtpm"] = v + } + if v := f.EnableIntegrityMonitoring; !dcl.IsEmptyValueIndirect(v) { + m["enableIntegrityMonitoring"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig + } + r.EnableSecureBoot = dcl.FlattenBool(m["enableSecureBoot"]) + r.EnableVtpm = dcl.FlattenBool(m["enableVtpm"]) + r.EnableIntegrityMonitoring = dcl.FlattenBool(m["enableIntegrityMonitoring"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { + m["numInstances"] = v + } + if v := f.Image; !dcl.IsEmptyValueIndirect(v) { + m["imageUri"] = v + } + if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { + m["machineTypeUri"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["diskConfig"] = v + } + if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { + m["preemptibility"] = v + } + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { + return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) + } else if v != nil { + m["accelerators"] = v + } + if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { + m["minCpuPlatform"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig + } + r.NumInstances = dcl.FlattenInteger(m["numInstances"]) + r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) + r.Image = dcl.FlattenString(m["imageUri"]) + r.MachineType = dcl.FlattenString(m["machineTypeUri"]) + r.DiskConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, m["diskConfig"], res) + r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) + r.Preemptibility = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(m["preemptibility"]) + r.ManagedGroupConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) + r.Accelerators = flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) + r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskType"] = v + } + if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { + m["bootDiskSizeGb"] = v + } + if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { + m["numLocalSsds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig + } + r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) + r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) + r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig + } + r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) + r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorTypeUri"] = v + } + if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { + m["acceleratorCount"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators + } + r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) + r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ImageVersion; !dcl.IsEmptyValueIndirect(v) { + m["imageVersion"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.OptionalComponents; v != nil { + m["optionalComponents"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig + } + r.ImageVersion = dcl.FlattenString(m["imageVersion"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.OptionalComponents = flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c, m["optionalComponents"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigInitializationActions) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigInitializationActions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions expands an instance of WorkflowTemplatePlacementManagedClusterConfigInitializationActions into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigInitializationActions, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ExecutableFile; !dcl.IsEmptyValueIndirect(v) { + m["executableFile"] = v + } + if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { + m["executionTimeout"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions flattens an instance of WorkflowTemplatePlacementManagedClusterConfigInitializationActions from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigInitializationActions(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigInitializationActions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigInitializationActions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigInitializationActions + } + r.ExecutableFile = dcl.FlattenString(m["executableFile"]) + r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.GcePdKmsKeyName; !dcl.IsEmptyValueIndirect(v) { + m["gcePdKmsKeyName"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEncryptionConfig + } + r.GcePdKmsKeyName = dcl.FlattenString(m["gcePdKmsKeyName"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Policy; !dcl.IsEmptyValueIndirect(v) { + m["policyUri"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig + } + r.Policy = dcl.FlattenString(m["policyUri"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, f.KerberosConfig, res); err != nil { + return nil, fmt.Errorf("error expanding KerberosConfig into kerberosConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kerberosConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfig + } + r.KerberosConfig = flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, m["kerberosConfig"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableKerberos; !dcl.IsEmptyValueIndirect(v) { + m["enableKerberos"] = v + } + if v := f.RootPrincipalPassword; !dcl.IsEmptyValueIndirect(v) { + m["rootPrincipalPasswordUri"] = v + } + if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { + m["kmsKeyUri"] = v + } + if v := f.Keystore; !dcl.IsEmptyValueIndirect(v) { + m["keystoreUri"] = v + } + if v := f.Truststore; !dcl.IsEmptyValueIndirect(v) { + m["truststoreUri"] = v + } + if v := f.KeystorePassword; !dcl.IsEmptyValueIndirect(v) { + m["keystorePasswordUri"] = v + } + if v := f.KeyPassword; !dcl.IsEmptyValueIndirect(v) { + m["keyPasswordUri"] = v + } + if v := f.TruststorePassword; !dcl.IsEmptyValueIndirect(v) { + m["truststorePasswordUri"] = v + } + if v := f.CrossRealmTrustRealm; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustRealm"] = v + } + if v := f.CrossRealmTrustKdc; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustKdc"] = v + } + if v := f.CrossRealmTrustAdminServer; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustAdminServer"] = v + } + if v := f.CrossRealmTrustSharedPassword; !dcl.IsEmptyValueIndirect(v) { + m["crossRealmTrustSharedPasswordUri"] = v + } + if v := f.KdcDbKey; !dcl.IsEmptyValueIndirect(v) { + m["kdcDbKeyUri"] = v + } + if v := f.TgtLifetimeHours; !dcl.IsEmptyValueIndirect(v) { + m["tgtLifetimeHours"] = v + } + if v := f.Realm; !dcl.IsEmptyValueIndirect(v) { + m["realm"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig + } + r.EnableKerberos = dcl.FlattenBool(m["enableKerberos"]) + r.RootPrincipalPassword = dcl.FlattenString(m["rootPrincipalPasswordUri"]) + r.KmsKey = dcl.FlattenString(m["kmsKeyUri"]) + r.Keystore = dcl.FlattenString(m["keystoreUri"]) + r.Truststore = dcl.FlattenString(m["truststoreUri"]) + r.KeystorePassword = dcl.FlattenString(m["keystorePasswordUri"]) + r.KeyPassword = dcl.FlattenString(m["keyPasswordUri"]) + r.TruststorePassword = dcl.FlattenString(m["truststorePasswordUri"]) + r.CrossRealmTrustRealm = dcl.FlattenString(m["crossRealmTrustRealm"]) + r.CrossRealmTrustKdc = dcl.FlattenString(m["crossRealmTrustKdc"]) + r.CrossRealmTrustAdminServer = dcl.FlattenString(m["crossRealmTrustAdminServer"]) + r.CrossRealmTrustSharedPassword = dcl.FlattenString(m["crossRealmTrustSharedPasswordUri"]) + r.KdcDbKey = dcl.FlattenString(m["kdcDbKeyUri"]) + r.TgtLifetimeHours = dcl.FlattenInteger(m["tgtLifetimeHours"]) + r.Realm = dcl.FlattenString(m["realm"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.IdleDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["idleDeleteTtl"] = v + } + if v := f.AutoDeleteTime; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTime"] = v + } + if v := f.AutoDeleteTtl; !dcl.IsEmptyValueIndirect(v) { + m["autoDeleteTtl"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigLifecycleConfig + } + r.IdleDeleteTtl = dcl.FlattenString(m["idleDeleteTtl"]) + r.AutoDeleteTime = dcl.FlattenString(m["autoDeleteTime"]) + r.AutoDeleteTtl = dcl.FlattenString(m["autoDeleteTtl"]) + r.IdleStartTime = dcl.FlattenString(m["idleStartTime"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.EnableHttpPortAccess; !dcl.IsEmptyValueIndirect(v) { + m["enableHttpPortAccess"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigEndpointConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigEndpointConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigEndpointConfig + } + r.HttpPorts = dcl.FlattenKeyValuePairs(m["httpPorts"]) + r.EnableHttpPortAccess = dcl.FlattenBool(m["enableHttpPortAccess"]) + + return r +} + +{{- if ne $.TargetVersionName "ga" }} +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, f.NamespacedGkeDeploymentTarget, res); err != nil { + return nil, fmt.Errorf("error expanding NamespacedGkeDeploymentTarget into namespacedGkeDeploymentTarget: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["namespacedGkeDeploymentTarget"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig + } + r.NamespacedGkeDeploymentTarget = flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, m["namespacedGkeDeploymentTarget"], res) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget expands an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetGkeCluster; !dcl.IsEmptyValueIndirect(v) { + m["targetGkeCluster"] = v + } + if v := f.ClusterNamespace; !dcl.IsEmptyValueIndirect(v) { + m["clusterNamespace"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget flattens an instance of WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget + } + r.TargetGkeCluster = dcl.FlattenString(m["targetGkeCluster"]) + r.ClusterNamespace = dcl.FlattenString(m["clusterNamespace"]) + + return r +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap expands the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap(c *Client, f map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice expands the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, f []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig expands an instance of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig into a JSON +// request object. +func expandWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, f *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { + m["dataprocMetastoreService"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig flattens an instance of WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementManagedClusterConfigMetastoreConfig + } + r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) + + return r +} + +{{- end }} +// expandWorkflowTemplatePlacementClusterSelectorMap expands the contents of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelectorMap(c *Client, f map[string]WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplatePlacementClusterSelector(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplatePlacementClusterSelectorSlice expands the contents of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelectorSlice(c *Client, f []WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplatePlacementClusterSelector(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplatePlacementClusterSelectorMap flattens the contents of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelectorMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementClusterSelector { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementClusterSelector{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementClusterSelector{} + } + + items := make(map[string]WorkflowTemplatePlacementClusterSelector) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementClusterSelector(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplatePlacementClusterSelectorSlice flattens the contents of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelectorSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementClusterSelector { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementClusterSelector{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementClusterSelector{} + } + + items := make([]WorkflowTemplatePlacementClusterSelector, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementClusterSelector(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplatePlacementClusterSelector expands an instance of WorkflowTemplatePlacementClusterSelector into a JSON +// request object. +func expandWorkflowTemplatePlacementClusterSelector(c *Client, f *WorkflowTemplatePlacementClusterSelector, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { + m["zone"] = v + } + if v := f.ClusterLabels; !dcl.IsEmptyValueIndirect(v) { + m["clusterLabels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplatePlacementClusterSelector flattens an instance of WorkflowTemplatePlacementClusterSelector from a JSON +// response object. +func flattenWorkflowTemplatePlacementClusterSelector(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplatePlacementClusterSelector { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplatePlacementClusterSelector{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplatePlacementClusterSelector + } + r.Zone = dcl.FlattenString(m["zone"]) + r.ClusterLabels = dcl.FlattenKeyValuePairs(m["clusterLabels"]) + + return r +} + +// expandWorkflowTemplateJobsMap expands the contents of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobsMap(c *Client, f map[string]WorkflowTemplateJobs, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSlice expands the contents of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobsSlice(c *Client, f []WorkflowTemplateJobs, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsMap flattens the contents of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobsMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobs{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobs{} + } + + items := make(map[string]WorkflowTemplateJobs) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSlice flattens the contents of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobsSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobs { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobs{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobs{} + } + + items := make([]WorkflowTemplateJobs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobs expands an instance of WorkflowTemplateJobs into a JSON +// request object. +func expandWorkflowTemplateJobs(c *Client, f *WorkflowTemplateJobs, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.StepId; !dcl.IsEmptyValueIndirect(v) { + m["stepId"] = v + } + if v, err := expandWorkflowTemplateJobsHadoopJob(c, f.HadoopJob, res); err != nil { + return nil, fmt.Errorf("error expanding HadoopJob into hadoopJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["hadoopJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkJob(c, f.SparkJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkJob into sparkJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkJob"] = v + } + if v, err := expandWorkflowTemplateJobsPysparkJob(c, f.PysparkJob, res); err != nil { + return nil, fmt.Errorf("error expanding PysparkJob into pysparkJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pysparkJob"] = v + } + if v, err := expandWorkflowTemplateJobsHiveJob(c, f.HiveJob, res); err != nil { + return nil, fmt.Errorf("error expanding HiveJob into hiveJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["hiveJob"] = v + } + if v, err := expandWorkflowTemplateJobsPigJob(c, f.PigJob, res); err != nil { + return nil, fmt.Errorf("error expanding PigJob into pigJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["pigJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkRJob(c, f.SparkRJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkRJob into sparkRJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkRJob"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJob(c, f.SparkSqlJob, res); err != nil { + return nil, fmt.Errorf("error expanding SparkSqlJob into sparkSqlJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["sparkSqlJob"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJob(c, f.PrestoJob, res); err != nil { + return nil, fmt.Errorf("error expanding PrestoJob into prestoJob: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["prestoJob"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + m["labels"] = v + } + if v, err := expandWorkflowTemplateJobsScheduling(c, f.Scheduling, res); err != nil { + return nil, fmt.Errorf("error expanding Scheduling into scheduling: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["scheduling"] = v + } + if v := f.PrerequisiteStepIds; v != nil { + m["prerequisiteStepIds"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobs flattens an instance of WorkflowTemplateJobs from a JSON +// response object. +func flattenWorkflowTemplateJobs(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobs + } + r.StepId = dcl.FlattenString(m["stepId"]) + r.HadoopJob = flattenWorkflowTemplateJobsHadoopJob(c, m["hadoopJob"], res) + r.SparkJob = flattenWorkflowTemplateJobsSparkJob(c, m["sparkJob"], res) + r.PysparkJob = flattenWorkflowTemplateJobsPysparkJob(c, m["pysparkJob"], res) + r.HiveJob = flattenWorkflowTemplateJobsHiveJob(c, m["hiveJob"], res) + r.PigJob = flattenWorkflowTemplateJobsPigJob(c, m["pigJob"], res) + r.SparkRJob = flattenWorkflowTemplateJobsSparkRJob(c, m["sparkRJob"], res) + r.SparkSqlJob = flattenWorkflowTemplateJobsSparkSqlJob(c, m["sparkSqlJob"], res) + r.PrestoJob = flattenWorkflowTemplateJobsPrestoJob(c, m["prestoJob"], res) + r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + r.Scheduling = flattenWorkflowTemplateJobsScheduling(c, m["scheduling"], res) + r.PrerequisiteStepIds = dcl.FlattenStringSlice(m["prerequisiteStepIds"]) + + return r +} + +// expandWorkflowTemplateJobsHadoopJobMap expands the contents of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobMap(c *Client, f map[string]WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHadoopJobSlice expands the contents of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobSlice(c *Client, f []WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHadoopJobMap flattens the contents of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHadoopJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHadoopJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHadoopJob{} + } + + items := make(map[string]WorkflowTemplateJobsHadoopJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHadoopJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHadoopJobSlice flattens the contents of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHadoopJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHadoopJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHadoopJob{} + } + + items := make([]WorkflowTemplateJobsHadoopJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHadoopJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHadoopJob expands an instance of WorkflowTemplateJobsHadoopJob into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJob(c *Client, f *WorkflowTemplateJobsHadoopJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainJarFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainJarFileUri"] = v + } + if v := f.MainClass; !dcl.IsEmptyValueIndirect(v) { + m["mainClass"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHadoopJob flattens an instance of WorkflowTemplateJobsHadoopJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHadoopJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHadoopJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHadoopJob + } + r.MainJarFileUri = dcl.FlattenString(m["mainJarFileUri"]) + r.MainClass = dcl.FlattenString(m["mainClass"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfigMap expands the contents of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHadoopJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHadoopJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsHadoopJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHadoopJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsHadoopJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHadoopJobLoggingConfig expands an instance of WorkflowTemplateJobsHadoopJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, f *WorkflowTemplateJobsHadoopJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHadoopJobLoggingConfig flattens an instance of WorkflowTemplateJobsHadoopJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsHadoopJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHadoopJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHadoopJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkJobMap expands the contents of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkJobSlice expands the contents of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobSlice(c *Client, f []WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkJobMap flattens the contents of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkJobSlice flattens the contents of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkJob{} + } + + items := make([]WorkflowTemplateJobsSparkJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkJob expands an instance of WorkflowTemplateJobsSparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJob(c *Client, f *WorkflowTemplateJobsSparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainJarFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainJarFileUri"] = v + } + if v := f.MainClass; !dcl.IsEmptyValueIndirect(v) { + m["mainClass"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkJob flattens an instance of WorkflowTemplateJobsSparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkJob + } + r.MainJarFileUri = dcl.FlattenString(m["mainJarFileUri"]) + r.MainClass = dcl.FlattenString(m["mainClass"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsPysparkJobMap expands the contents of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobMap(c *Client, f map[string]WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPysparkJobSlice expands the contents of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobSlice(c *Client, f []WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPysparkJobMap flattens the contents of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPysparkJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPysparkJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPysparkJob{} + } + + items := make(map[string]WorkflowTemplateJobsPysparkJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPysparkJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPysparkJobSlice flattens the contents of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPysparkJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPysparkJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPysparkJob{} + } + + items := make([]WorkflowTemplateJobsPysparkJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPysparkJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPysparkJob expands an instance of WorkflowTemplateJobsPysparkJob into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJob(c *Client, f *WorkflowTemplateJobsPysparkJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainPythonFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainPythonFileUri"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.PythonFileUris; v != nil { + m["pythonFileUris"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPysparkJob flattens an instance of WorkflowTemplateJobsPysparkJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPysparkJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPysparkJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPysparkJob + } + r.MainPythonFileUri = dcl.FlattenString(m["mainPythonFileUri"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.PythonFileUris = dcl.FlattenStringSlice(m["pythonFileUris"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPysparkJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPysparkJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPysparkJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPysparkJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPysparkJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPysparkJobLoggingConfig expands an instance of WorkflowTemplateJobsPysparkJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPysparkJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPysparkJobLoggingConfig flattens an instance of WorkflowTemplateJobsPysparkJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPysparkJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPysparkJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPysparkJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsHiveJobMap expands the contents of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobMap(c *Client, f map[string]WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHiveJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHiveJobSlice expands the contents of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobSlice(c *Client, f []WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHiveJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHiveJobMap flattens the contents of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHiveJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHiveJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHiveJob{} + } + + items := make(map[string]WorkflowTemplateJobsHiveJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHiveJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHiveJobSlice flattens the contents of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHiveJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHiveJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHiveJob{} + } + + items := make([]WorkflowTemplateJobsHiveJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHiveJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHiveJob expands an instance of WorkflowTemplateJobsHiveJob into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJob(c *Client, f *WorkflowTemplateJobsHiveJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsHiveJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHiveJob flattens an instance of WorkflowTemplateJobsHiveJob from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHiveJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHiveJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHiveJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsHiveJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + + return r +} + +// expandWorkflowTemplateJobsHiveJobQueryListMap expands the contents of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsHiveJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsHiveJobQueryListSlice expands the contents of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, f []WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsHiveJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsHiveJobQueryListMap flattens the contents of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsHiveJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsHiveJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsHiveJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsHiveJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsHiveJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsHiveJobQueryListSlice flattens the contents of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsHiveJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsHiveJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsHiveJobQueryList{} + } + + items := make([]WorkflowTemplateJobsHiveJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsHiveJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsHiveJobQueryList expands an instance of WorkflowTemplateJobsHiveJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsHiveJobQueryList(c *Client, f *WorkflowTemplateJobsHiveJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsHiveJobQueryList flattens an instance of WorkflowTemplateJobsHiveJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsHiveJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsHiveJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsHiveJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsHiveJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPigJobMap expands the contents of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobMap(c *Client, f map[string]WorkflowTemplateJobsPigJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobSlice expands the contents of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobSlice(c *Client, f []WorkflowTemplateJobsPigJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobMap flattens the contents of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJob{} + } + + items := make(map[string]WorkflowTemplateJobsPigJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobSlice flattens the contents of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJob{} + } + + items := make([]WorkflowTemplateJobsPigJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJob expands an instance of WorkflowTemplateJobsPigJob into a JSON +// request object. +func expandWorkflowTemplateJobsPigJob(c *Client, f *WorkflowTemplateJobsPigJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsPigJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJob flattens an instance of WorkflowTemplateJobsPigJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsPigJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPigJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPigJobQueryListMap expands the contents of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobQueryListSlice expands the contents of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryListSlice(c *Client, f []WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobQueryListMap flattens the contents of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsPigJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobQueryListSlice flattens the contents of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJobQueryList{} + } + + items := make([]WorkflowTemplateJobsPigJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJobQueryList expands an instance of WorkflowTemplateJobsPigJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobQueryList(c *Client, f *WorkflowTemplateJobsPigJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJobQueryList flattens an instance of WorkflowTemplateJobsPigJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPigJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPigJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPigJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPigJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPigJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPigJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPigJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPigJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPigJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPigJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPigJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPigJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPigJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPigJobLoggingConfig expands an instance of WorkflowTemplateJobsPigJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPigJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPigJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPigJobLoggingConfig flattens an instance of WorkflowTemplateJobsPigJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPigJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPigJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPigJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkRJobMap expands the contents of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkRJobSlice expands the contents of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobSlice(c *Client, f []WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkRJobMap flattens the contents of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkRJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkRJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkRJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkRJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkRJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkRJobSlice flattens the contents of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkRJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkRJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkRJob{} + } + + items := make([]WorkflowTemplateJobsSparkRJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkRJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkRJob expands an instance of WorkflowTemplateJobsSparkRJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJob(c *Client, f *WorkflowTemplateJobsSparkRJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MainRFileUri; !dcl.IsEmptyValueIndirect(v) { + m["mainRFileUri"] = v + } + if v := f.Args; v != nil { + m["args"] = v + } + if v := f.FileUris; v != nil { + m["fileUris"] = v + } + if v := f.ArchiveUris; v != nil { + m["archiveUris"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkRJob flattens an instance of WorkflowTemplateJobsSparkRJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkRJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkRJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkRJob + } + r.MainRFileUri = dcl.FlattenString(m["mainRFileUri"]) + r.Args = dcl.FlattenStringSlice(m["args"]) + r.FileUris = dcl.FlattenStringSlice(m["fileUris"]) + r.ArchiveUris = dcl.FlattenStringSlice(m["archiveUris"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkRJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkRJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkRJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkRJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkRJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkRJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkRJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkRJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkRJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkRJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkRJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkRJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkRJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobMap expands the contents of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobSlice expands the contents of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobMap flattens the contents of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJob{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobSlice flattens the contents of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJob{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJob expands an instance of WorkflowTemplateJobsSparkSqlJob into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJob(c *Client, f *WorkflowTemplateJobsSparkSqlJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ScriptVariables; !dcl.IsEmptyValueIndirect(v) { + m["scriptVariables"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v := f.JarFileUris; v != nil { + m["jarFileUris"] = v + } + if v, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJob flattens an instance of WorkflowTemplateJobsSparkSqlJob from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, m["queryList"], res) + r.ScriptVariables = dcl.FlattenKeyValuePairs(m["scriptVariables"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.JarFileUris = dcl.FlattenStringSlice(m["jarFileUris"]) + r.LoggingConfig = flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryListMap expands the contents of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryListSlice expands the contents of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryListMap flattens the contents of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryListSlice flattens the contents of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJobQueryList{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJobQueryList expands an instance of WorkflowTemplateJobsSparkSqlJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, f *WorkflowTemplateJobsSparkSqlJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobQueryList flattens an instance of WorkflowTemplateJobsSparkSqlJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfigMap expands the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsSparkSqlJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsSparkSqlJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsSparkSqlJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsSparkSqlJobLoggingConfig expands an instance of WorkflowTemplateJobsSparkSqlJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, f *WorkflowTemplateJobsSparkSqlJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig flattens an instance of WorkflowTemplateJobsSparkSqlJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsSparkSqlJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsSparkSqlJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobMap expands the contents of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJob(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobSlice expands the contents of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobSlice(c *Client, f []WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJob(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobMap flattens the contents of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJob { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJob{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJob{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJob) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJob(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobSlice flattens the contents of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJob { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJob{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJob{} + } + + items := make([]WorkflowTemplateJobsPrestoJob, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJob(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJob expands an instance of WorkflowTemplateJobsPrestoJob into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJob(c *Client, f *WorkflowTemplateJobsPrestoJob, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.QueryFileUri; !dcl.IsEmptyValueIndirect(v) { + m["queryFileUri"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, f.QueryList, res); err != nil { + return nil, fmt.Errorf("error expanding QueryList into queryList: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["queryList"] = v + } + if v := f.ContinueOnFailure; !dcl.IsEmptyValueIndirect(v) { + m["continueOnFailure"] = v + } + if v := f.OutputFormat; !dcl.IsEmptyValueIndirect(v) { + m["outputFormat"] = v + } + if v := f.ClientTags; v != nil { + m["clientTags"] = v + } + if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { + m["properties"] = v + } + if v, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJob flattens an instance of WorkflowTemplateJobsPrestoJob from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJob(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJob { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJob{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJob + } + r.QueryFileUri = dcl.FlattenString(m["queryFileUri"]) + r.QueryList = flattenWorkflowTemplateJobsPrestoJobQueryList(c, m["queryList"], res) + r.ContinueOnFailure = dcl.FlattenBool(m["continueOnFailure"]) + r.OutputFormat = dcl.FlattenString(m["outputFormat"]) + r.ClientTags = dcl.FlattenStringSlice(m["clientTags"]) + r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) + r.LoggingConfig = flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobQueryListMap expands the contents of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryListMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobQueryListSlice expands the contents of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, f []WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobQueryList(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobQueryListMap flattens the contents of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryListMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJobQueryList { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJobQueryList{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJobQueryList{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJobQueryList) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJobQueryList(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobQueryListSlice flattens the contents of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryListSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJobQueryList { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJobQueryList{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJobQueryList{} + } + + items := make([]WorkflowTemplateJobsPrestoJobQueryList, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJobQueryList(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJobQueryList expands an instance of WorkflowTemplateJobsPrestoJobQueryList into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobQueryList(c *Client, f *WorkflowTemplateJobsPrestoJobQueryList, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Queries; v != nil { + m["queries"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJobQueryList flattens an instance of WorkflowTemplateJobsPrestoJobQueryList from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobQueryList(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJobQueryList { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJobQueryList{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJobQueryList + } + r.Queries = dcl.FlattenStringSlice(m["queries"]) + + return r +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfigMap expands the contents of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfigMap(c *Client, f map[string]WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfigSlice expands the contents of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, f []WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsPrestoJobLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfigMap flattens the contents of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfigMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsPrestoJobLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + items := make(map[string]WorkflowTemplateJobsPrestoJobLoggingConfig) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfigSlice flattens the contents of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfigSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsPrestoJobLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + + items := make([]WorkflowTemplateJobsPrestoJobLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsPrestoJobLoggingConfig expands an instance of WorkflowTemplateJobsPrestoJobLoggingConfig into a JSON +// request object. +func expandWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, f *WorkflowTemplateJobsPrestoJobLoggingConfig, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DriverLogLevels; !dcl.IsEmptyValueIndirect(v) { + m["driverLogLevels"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsPrestoJobLoggingConfig flattens an instance of WorkflowTemplateJobsPrestoJobLoggingConfig from a JSON +// response object. +func flattenWorkflowTemplateJobsPrestoJobLoggingConfig(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsPrestoJobLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsPrestoJobLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + r.DriverLogLevels = dcl.FlattenKeyValuePairs(m["driverLogLevels"]) + + return r +} + +// expandWorkflowTemplateJobsSchedulingMap expands the contents of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsSchedulingMap(c *Client, f map[string]WorkflowTemplateJobsScheduling, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateJobsScheduling(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateJobsSchedulingSlice expands the contents of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsSchedulingSlice(c *Client, f []WorkflowTemplateJobsScheduling, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateJobsScheduling(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateJobsSchedulingMap flattens the contents of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsSchedulingMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateJobsScheduling { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateJobsScheduling{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateJobsScheduling{} + } + + items := make(map[string]WorkflowTemplateJobsScheduling) + for k, item := range a { + items[k] = *flattenWorkflowTemplateJobsScheduling(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateJobsSchedulingSlice flattens the contents of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsSchedulingSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateJobsScheduling { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateJobsScheduling{} + } + + if len(a) == 0 { + return []WorkflowTemplateJobsScheduling{} + } + + items := make([]WorkflowTemplateJobsScheduling, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateJobsScheduling(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateJobsScheduling expands an instance of WorkflowTemplateJobsScheduling into a JSON +// request object. +func expandWorkflowTemplateJobsScheduling(c *Client, f *WorkflowTemplateJobsScheduling, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MaxFailuresPerHour; !dcl.IsEmptyValueIndirect(v) { + m["maxFailuresPerHour"] = v + } + if v := f.MaxFailuresTotal; !dcl.IsEmptyValueIndirect(v) { + m["maxFailuresTotal"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateJobsScheduling flattens an instance of WorkflowTemplateJobsScheduling from a JSON +// response object. +func flattenWorkflowTemplateJobsScheduling(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateJobsScheduling { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateJobsScheduling{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateJobsScheduling + } + r.MaxFailuresPerHour = dcl.FlattenInteger(m["maxFailuresPerHour"]) + r.MaxFailuresTotal = dcl.FlattenInteger(m["maxFailuresTotal"]) + + return r +} + +// expandWorkflowTemplateParametersMap expands the contents of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParametersMap(c *Client, f map[string]WorkflowTemplateParameters, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParameters(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersSlice expands the contents of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParametersSlice(c *Client, f []WorkflowTemplateParameters, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParameters(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersMap flattens the contents of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParametersMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParameters { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParameters{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParameters{} + } + + items := make(map[string]WorkflowTemplateParameters) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParameters(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersSlice flattens the contents of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParametersSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParameters { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParameters{} + } + + if len(a) == 0 { + return []WorkflowTemplateParameters{} + } + + items := make([]WorkflowTemplateParameters, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParameters(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParameters expands an instance of WorkflowTemplateParameters into a JSON +// request object. +func expandWorkflowTemplateParameters(c *Client, f *WorkflowTemplateParameters, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Fields; v != nil { + m["fields"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + m["description"] = v + } + if v, err := expandWorkflowTemplateParametersValidation(c, f.Validation, res); err != nil { + return nil, fmt.Errorf("error expanding Validation into validation: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["validation"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParameters flattens an instance of WorkflowTemplateParameters from a JSON +// response object. +func flattenWorkflowTemplateParameters(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParameters { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParameters{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParameters + } + r.Name = dcl.FlattenString(m["name"]) + r.Fields = dcl.FlattenStringSlice(m["fields"]) + r.Description = dcl.FlattenString(m["description"]) + r.Validation = flattenWorkflowTemplateParametersValidation(c, m["validation"], res) + + return r +} + +// expandWorkflowTemplateParametersValidationMap expands the contents of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidationMap(c *Client, f map[string]WorkflowTemplateParametersValidation, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidation(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationSlice expands the contents of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidationSlice(c *Client, f []WorkflowTemplateParametersValidation, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidation(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationMap flattens the contents of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidation { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidation{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidation{} + } + + items := make(map[string]WorkflowTemplateParametersValidation) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidation(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationSlice flattens the contents of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidation { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidation{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidation{} + } + + items := make([]WorkflowTemplateParametersValidation, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidation(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidation expands an instance of WorkflowTemplateParametersValidation into a JSON +// request object. +func expandWorkflowTemplateParametersValidation(c *Client, f *WorkflowTemplateParametersValidation, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandWorkflowTemplateParametersValidationRegex(c, f.Regex, res); err != nil { + return nil, fmt.Errorf("error expanding Regex into regex: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["regex"] = v + } + if v, err := expandWorkflowTemplateParametersValidationValues(c, f.Values, res); err != nil { + return nil, fmt.Errorf("error expanding Values into values: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidation flattens an instance of WorkflowTemplateParametersValidation from a JSON +// response object. +func flattenWorkflowTemplateParametersValidation(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidation { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidation{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidation + } + r.Regex = flattenWorkflowTemplateParametersValidationRegex(c, m["regex"], res) + r.Values = flattenWorkflowTemplateParametersValidationValues(c, m["values"], res) + + return r +} + +// expandWorkflowTemplateParametersValidationRegexMap expands the contents of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegexMap(c *Client, f map[string]WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidationRegex(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationRegexSlice expands the contents of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegexSlice(c *Client, f []WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidationRegex(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationRegexMap flattens the contents of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegexMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidationRegex { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidationRegex{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidationRegex{} + } + + items := make(map[string]WorkflowTemplateParametersValidationRegex) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidationRegex(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationRegexSlice flattens the contents of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegexSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidationRegex { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidationRegex{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidationRegex{} + } + + items := make([]WorkflowTemplateParametersValidationRegex, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidationRegex(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidationRegex expands an instance of WorkflowTemplateParametersValidationRegex into a JSON +// request object. +func expandWorkflowTemplateParametersValidationRegex(c *Client, f *WorkflowTemplateParametersValidationRegex, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Regexes; v != nil { + m["regexes"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidationRegex flattens an instance of WorkflowTemplateParametersValidationRegex from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationRegex(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidationRegex { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidationRegex{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidationRegex + } + r.Regexes = dcl.FlattenStringSlice(m["regexes"]) + + return r +} + +// expandWorkflowTemplateParametersValidationValuesMap expands the contents of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValuesMap(c *Client, f map[string]WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandWorkflowTemplateParametersValidationValues(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandWorkflowTemplateParametersValidationValuesSlice expands the contents of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValuesSlice(c *Client, f []WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandWorkflowTemplateParametersValidationValues(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenWorkflowTemplateParametersValidationValuesMap flattens the contents of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValuesMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplateParametersValidationValues { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplateParametersValidationValues{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplateParametersValidationValues{} + } + + items := make(map[string]WorkflowTemplateParametersValidationValues) + for k, item := range a { + items[k] = *flattenWorkflowTemplateParametersValidationValues(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenWorkflowTemplateParametersValidationValuesSlice flattens the contents of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValuesSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplateParametersValidationValues { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplateParametersValidationValues{} + } + + if len(a) == 0 { + return []WorkflowTemplateParametersValidationValues{} + } + + items := make([]WorkflowTemplateParametersValidationValues, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplateParametersValidationValues(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandWorkflowTemplateParametersValidationValues expands an instance of WorkflowTemplateParametersValidationValues into a JSON +// request object. +func expandWorkflowTemplateParametersValidationValues(c *Client, f *WorkflowTemplateParametersValidationValues, res *WorkflowTemplate) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Values; v != nil { + m["values"] = v + } + + return m, nil +} + +// flattenWorkflowTemplateParametersValidationValues flattens an instance of WorkflowTemplateParametersValidationValues from a JSON +// response object. +func flattenWorkflowTemplateParametersValidationValues(c *Client, i interface{}, res *WorkflowTemplate) *WorkflowTemplateParametersValidationValues { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &WorkflowTemplateParametersValidationValues{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyWorkflowTemplateParametersValidationValues + } + r.Values = dcl.FlattenStringSlice(m["values"]) + + return r +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s) +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumMap flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumMap(c *Client, i interface{}, res *WorkflowTemplate) map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make(map[string]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum) + for k, item := range a { + items[k] = *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{})) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice flattens the contents of WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON +// response object. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c *Client, i interface{}, res *WorkflowTemplate) []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + a, ok := i.([]interface{}) + if !ok { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + if len(a) == 0 { + return []WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum{} + } + + items := make([]WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{}))) + } + + return items +} + +// flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum asserts that an interface is a string, and returns a +// pointer to a *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum with the same value as that string. +func flattenWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum(i interface{}) *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *WorkflowTemplate) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalWorkflowTemplate(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type workflowTemplateDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp workflowTemplateApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToWorkflowTemplateDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]workflowTemplateDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []workflowTemplateDiff + // For each operation name, create a workflowTemplateDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := workflowTemplateDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToWorkflowTemplateApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToWorkflowTemplateApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (workflowTemplateApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractWorkflowTemplateFields(r *WorkflowTemplate) error { + vEncryptionConfig := r.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplateEncryptionConfig{} + } + if err := extractWorkflowTemplateEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + r.EncryptionConfig = vEncryptionConfig + } + vPlacement := r.Placement + if vPlacement == nil { + // note: explicitly not the empty object. + vPlacement = &WorkflowTemplatePlacement{} + } + if err := extractWorkflowTemplatePlacementFields(r, vPlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPlacement) { + r.Placement = vPlacement + } + return nil +} +func extractWorkflowTemplateEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplateEncryptionConfig) error { + return nil +} +func extractWorkflowTemplatePlacementFields(r *WorkflowTemplate, o *WorkflowTemplatePlacement) error { + vManagedCluster := o.ManagedCluster + if vManagedCluster == nil { + // note: explicitly not the empty object. + vManagedCluster = &WorkflowTemplatePlacementManagedCluster{} + } + if err := extractWorkflowTemplatePlacementManagedClusterFields(r, vManagedCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedCluster) { + o.ManagedCluster = vManagedCluster + } + vClusterSelector := o.ClusterSelector + if vClusterSelector == nil { + // note: explicitly not the empty object. + vClusterSelector = &WorkflowTemplatePlacementClusterSelector{} + } + if err := extractWorkflowTemplatePlacementClusterSelectorFields(r, vClusterSelector); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vClusterSelector) { + o.ClusterSelector = vClusterSelector + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedCluster) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &WorkflowTemplatePlacementManagedClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } +{{- if ne $.TargetVersionName "ga" }} + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } +{{- end }} + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigInitializationActionsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) error { + return nil +} +{{- end }} +func extractWorkflowTemplatePlacementClusterSelectorFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementClusterSelector) error { + return nil +} +func extractWorkflowTemplateJobsFields(r *WorkflowTemplate, o *WorkflowTemplateJobs) error { + vHadoopJob := o.HadoopJob + if vHadoopJob == nil { + // note: explicitly not the empty object. + vHadoopJob = &WorkflowTemplateJobsHadoopJob{} + } + if err := extractWorkflowTemplateJobsHadoopJobFields(r, vHadoopJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHadoopJob) { + o.HadoopJob = vHadoopJob + } + vSparkJob := o.SparkJob + if vSparkJob == nil { + // note: explicitly not the empty object. + vSparkJob = &WorkflowTemplateJobsSparkJob{} + } + if err := extractWorkflowTemplateJobsSparkJobFields(r, vSparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkJob) { + o.SparkJob = vSparkJob + } + vPysparkJob := o.PysparkJob + if vPysparkJob == nil { + // note: explicitly not the empty object. + vPysparkJob = &WorkflowTemplateJobsPysparkJob{} + } + if err := extractWorkflowTemplateJobsPysparkJobFields(r, vPysparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPysparkJob) { + o.PysparkJob = vPysparkJob + } + vHiveJob := o.HiveJob + if vHiveJob == nil { + // note: explicitly not the empty object. + vHiveJob = &WorkflowTemplateJobsHiveJob{} + } + if err := extractWorkflowTemplateJobsHiveJobFields(r, vHiveJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHiveJob) { + o.HiveJob = vHiveJob + } + vPigJob := o.PigJob + if vPigJob == nil { + // note: explicitly not the empty object. + vPigJob = &WorkflowTemplateJobsPigJob{} + } + if err := extractWorkflowTemplateJobsPigJobFields(r, vPigJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPigJob) { + o.PigJob = vPigJob + } + vSparkRJob := o.SparkRJob + if vSparkRJob == nil { + // note: explicitly not the empty object. + vSparkRJob = &WorkflowTemplateJobsSparkRJob{} + } + if err := extractWorkflowTemplateJobsSparkRJobFields(r, vSparkRJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkRJob) { + o.SparkRJob = vSparkRJob + } + vSparkSqlJob := o.SparkSqlJob + if vSparkSqlJob == nil { + // note: explicitly not the empty object. + vSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobFields(r, vSparkSqlJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkSqlJob) { + o.SparkSqlJob = vSparkSqlJob + } + vPrestoJob := o.PrestoJob + if vPrestoJob == nil { + // note: explicitly not the empty object. + vPrestoJob = &WorkflowTemplateJobsPrestoJob{} + } + if err := extractWorkflowTemplateJobsPrestoJobFields(r, vPrestoJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrestoJob) { + o.PrestoJob = vPrestoJob + } + vScheduling := o.Scheduling + if vScheduling == nil { + // note: explicitly not the empty object. + vScheduling = &WorkflowTemplateJobsScheduling{} + } + if err := extractWorkflowTemplateJobsSchedulingFields(r, vScheduling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vScheduling) { + o.Scheduling = vScheduling + } + return nil +} +func extractWorkflowTemplateJobsHadoopJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsPysparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsHiveJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsHiveJobQueryList{} + } + if err := extractWorkflowTemplateJobsHiveJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + return nil +} +func extractWorkflowTemplateJobsHiveJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPigJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPigJobQueryList{} + } + if err := extractWorkflowTemplateJobsPigJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPigJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPigJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPigJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkRJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsPrestoJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPrestoJobQueryList{} + } + if err := extractWorkflowTemplateJobsPrestoJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractWorkflowTemplateJobsPrestoJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobQueryList) error { + return nil +} +func extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobLoggingConfig) error { + return nil +} +func extractWorkflowTemplateJobsSchedulingFields(r *WorkflowTemplate, o *WorkflowTemplateJobsScheduling) error { + return nil +} +func extractWorkflowTemplateParametersFields(r *WorkflowTemplate, o *WorkflowTemplateParameters) error { + vValidation := o.Validation + if vValidation == nil { + // note: explicitly not the empty object. + vValidation = &WorkflowTemplateParametersValidation{} + } + if err := extractWorkflowTemplateParametersValidationFields(r, vValidation); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValidation) { + o.Validation = vValidation + } + return nil +} +func extractWorkflowTemplateParametersValidationFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidation) error { + vRegex := o.Regex + if vRegex == nil { + // note: explicitly not the empty object. + vRegex = &WorkflowTemplateParametersValidationRegex{} + } + if err := extractWorkflowTemplateParametersValidationRegexFields(r, vRegex); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRegex) { + o.Regex = vRegex + } + vValues := o.Values + if vValues == nil { + // note: explicitly not the empty object. + vValues = &WorkflowTemplateParametersValidationValues{} + } + if err := extractWorkflowTemplateParametersValidationValuesFields(r, vValues); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValues) { + o.Values = vValues + } + return nil +} +func extractWorkflowTemplateParametersValidationRegexFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationRegex) error { + return nil +} +func extractWorkflowTemplateParametersValidationValuesFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationValues) error { + return nil +} + +func postReadExtractWorkflowTemplateFields(r *WorkflowTemplate) error { + vEncryptionConfig := r.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplateEncryptionConfig{} + } + if err := postReadExtractWorkflowTemplateEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + r.EncryptionConfig = vEncryptionConfig + } + vPlacement := r.Placement + if vPlacement == nil { + // note: explicitly not the empty object. + vPlacement = &WorkflowTemplatePlacement{} + } + if err := postReadExtractWorkflowTemplatePlacementFields(r, vPlacement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPlacement) { + r.Placement = vPlacement + } + return nil +} +func postReadExtractWorkflowTemplateEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplateEncryptionConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementFields(r *WorkflowTemplate, o *WorkflowTemplatePlacement) error { + vManagedCluster := o.ManagedCluster + if vManagedCluster == nil { + // note: explicitly not the empty object. + vManagedCluster = &WorkflowTemplatePlacementManagedCluster{} + } + if err := extractWorkflowTemplatePlacementManagedClusterFields(r, vManagedCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedCluster) { + o.ManagedCluster = vManagedCluster + } + vClusterSelector := o.ClusterSelector + if vClusterSelector == nil { + // note: explicitly not the empty object. + vClusterSelector = &WorkflowTemplatePlacementClusterSelector{} + } + if err := extractWorkflowTemplatePlacementClusterSelectorFields(r, vClusterSelector); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vClusterSelector) { + o.ClusterSelector = vClusterSelector + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedCluster) error { + vConfig := o.Config + if vConfig == nil { + // note: explicitly not the empty object. + vConfig = &WorkflowTemplatePlacementManagedClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigFields(r, vConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfig) { + o.Config = vConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfig) error { + vGceClusterConfig := o.GceClusterConfig + if vGceClusterConfig == nil { + // note: explicitly not the empty object. + vGceClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { + o.GceClusterConfig = vGceClusterConfig + } + vMasterConfig := o.MasterConfig + if vMasterConfig == nil { + // note: explicitly not the empty object. + vMasterConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMasterConfig) { + o.MasterConfig = vMasterConfig + } + vWorkerConfig := o.WorkerConfig + if vWorkerConfig == nil { + // note: explicitly not the empty object. + vWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWorkerConfig) { + o.WorkerConfig = vWorkerConfig + } + vSecondaryWorkerConfig := o.SecondaryWorkerConfig + if vSecondaryWorkerConfig == nil { + // note: explicitly not the empty object. + vSecondaryWorkerConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { + o.SecondaryWorkerConfig = vSecondaryWorkerConfig + } + vSoftwareConfig := o.SoftwareConfig + if vSoftwareConfig == nil { + // note: explicitly not the empty object. + vSoftwareConfig = &WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { + o.SoftwareConfig = vSoftwareConfig + } + vEncryptionConfig := o.EncryptionConfig + if vEncryptionConfig == nil { + // note: explicitly not the empty object. + vEncryptionConfig = &WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { + o.EncryptionConfig = vEncryptionConfig + } + vAutoscalingConfig := o.AutoscalingConfig + if vAutoscalingConfig == nil { + // note: explicitly not the empty object. + vAutoscalingConfig = &WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { + o.AutoscalingConfig = vAutoscalingConfig + } + vSecurityConfig := o.SecurityConfig + if vSecurityConfig == nil { + // note: explicitly not the empty object. + vSecurityConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSecurityConfig) { + o.SecurityConfig = vSecurityConfig + } + vLifecycleConfig := o.LifecycleConfig + if vLifecycleConfig == nil { + // note: explicitly not the empty object. + vLifecycleConfig = &WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { + o.LifecycleConfig = vLifecycleConfig + } + vEndpointConfig := o.EndpointConfig + if vEndpointConfig == nil { + // note: explicitly not the empty object. + vEndpointConfig = &WorkflowTemplatePlacementManagedClusterConfigEndpointConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpointConfig) { + o.EndpointConfig = vEndpointConfig + } +{{- if ne $.TargetVersionName "ga" }} + vGkeClusterConfig := o.GkeClusterConfig + if vGkeClusterConfig == nil { + // note: explicitly not the empty object. + vGkeClusterConfig = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { + o.GkeClusterConfig = vGkeClusterConfig + } + vMetastoreConfig := o.MetastoreConfig + if vMetastoreConfig == nil { + // note: explicitly not the empty object. + vMetastoreConfig = &WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { + o.MetastoreConfig = vMetastoreConfig + } +{{- end }} + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) error { + vReservationAffinity := o.ReservationAffinity + if vReservationAffinity == nil { + // note: explicitly not the empty object. + vReservationAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReservationAffinity) { + o.ReservationAffinity = vReservationAffinity + } + vNodeGroupAffinity := o.NodeGroupAffinity + if vNodeGroupAffinity == nil { + // note: explicitly not the empty object. + vNodeGroupAffinity = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { + o.NodeGroupAffinity = vNodeGroupAffinity + } + vShieldedInstanceConfig := o.ShieldedInstanceConfig + if vShieldedInstanceConfig == nil { + // note: explicitly not the empty object. + vShieldedInstanceConfig = &WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { + o.ShieldedInstanceConfig = vShieldedInstanceConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig) error { + vDiskConfig := o.DiskConfig + if vDiskConfig == nil { + // note: explicitly not the empty object. + vDiskConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDiskConfig) { + o.DiskConfig = vDiskConfig + } + vManagedGroupConfig := o.ManagedGroupConfig + if vManagedGroupConfig == nil { + // note: explicitly not the empty object. + vManagedGroupConfig = &WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { + o.ManagedGroupConfig = vManagedGroupConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigInitializationActionsFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigInitializationActions) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig) error { + vKerberosConfig := o.KerberosConfig + if vKerberosConfig == nil { + // note: explicitly not the empty object. + vKerberosConfig = &WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKerberosConfig) { + o.KerberosConfig = vKerberosConfig + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigEndpointConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig) error { + return nil +} +{{- if ne $.TargetVersionName "ga" }} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig) error { + vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget + if vNamespacedGkeDeploymentTarget == nil { + // note: explicitly not the empty object. + vNamespacedGkeDeploymentTarget = &WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} + } + if err := extractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { + o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget + } + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { + return nil +} +func postReadExtractWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig) error { + return nil +} +{{- end }} +func postReadExtractWorkflowTemplatePlacementClusterSelectorFields(r *WorkflowTemplate, o *WorkflowTemplatePlacementClusterSelector) error { + return nil +} +func postReadExtractWorkflowTemplateJobsFields(r *WorkflowTemplate, o *WorkflowTemplateJobs) error { + vHadoopJob := o.HadoopJob + if vHadoopJob == nil { + // note: explicitly not the empty object. + vHadoopJob = &WorkflowTemplateJobsHadoopJob{} + } + if err := extractWorkflowTemplateJobsHadoopJobFields(r, vHadoopJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHadoopJob) { + o.HadoopJob = vHadoopJob + } + vSparkJob := o.SparkJob + if vSparkJob == nil { + // note: explicitly not the empty object. + vSparkJob = &WorkflowTemplateJobsSparkJob{} + } + if err := extractWorkflowTemplateJobsSparkJobFields(r, vSparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkJob) { + o.SparkJob = vSparkJob + } + vPysparkJob := o.PysparkJob + if vPysparkJob == nil { + // note: explicitly not the empty object. + vPysparkJob = &WorkflowTemplateJobsPysparkJob{} + } + if err := extractWorkflowTemplateJobsPysparkJobFields(r, vPysparkJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPysparkJob) { + o.PysparkJob = vPysparkJob + } + vHiveJob := o.HiveJob + if vHiveJob == nil { + // note: explicitly not the empty object. + vHiveJob = &WorkflowTemplateJobsHiveJob{} + } + if err := extractWorkflowTemplateJobsHiveJobFields(r, vHiveJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHiveJob) { + o.HiveJob = vHiveJob + } + vPigJob := o.PigJob + if vPigJob == nil { + // note: explicitly not the empty object. + vPigJob = &WorkflowTemplateJobsPigJob{} + } + if err := extractWorkflowTemplateJobsPigJobFields(r, vPigJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPigJob) { + o.PigJob = vPigJob + } + vSparkRJob := o.SparkRJob + if vSparkRJob == nil { + // note: explicitly not the empty object. + vSparkRJob = &WorkflowTemplateJobsSparkRJob{} + } + if err := extractWorkflowTemplateJobsSparkRJobFields(r, vSparkRJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkRJob) { + o.SparkRJob = vSparkRJob + } + vSparkSqlJob := o.SparkSqlJob + if vSparkSqlJob == nil { + // note: explicitly not the empty object. + vSparkSqlJob = &WorkflowTemplateJobsSparkSqlJob{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobFields(r, vSparkSqlJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSparkSqlJob) { + o.SparkSqlJob = vSparkSqlJob + } + vPrestoJob := o.PrestoJob + if vPrestoJob == nil { + // note: explicitly not the empty object. + vPrestoJob = &WorkflowTemplateJobsPrestoJob{} + } + if err := extractWorkflowTemplateJobsPrestoJobFields(r, vPrestoJob); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPrestoJob) { + o.PrestoJob = vPrestoJob + } + vScheduling := o.Scheduling + if vScheduling == nil { + // note: explicitly not the empty object. + vScheduling = &WorkflowTemplateJobsScheduling{} + } + if err := extractWorkflowTemplateJobsSchedulingFields(r, vScheduling); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vScheduling) { + o.Scheduling = vScheduling + } + return nil +} +func postReadExtractWorkflowTemplateJobsHadoopJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsHadoopJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsHadoopJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHadoopJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPysparkJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPysparkJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPysparkJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPysparkJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsHiveJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsHiveJobQueryList{} + } + if err := extractWorkflowTemplateJobsHiveJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + return nil +} +func postReadExtractWorkflowTemplateJobsHiveJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsHiveJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPigJobQueryList{} + } + if err := extractWorkflowTemplateJobsPigJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPigJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPigJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPigJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPigJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkRJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJob) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkRJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkRJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkRJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsSparkSqlJobQueryList{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsSparkSqlJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSparkSqlJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsSparkSqlJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJob) error { + vQueryList := o.QueryList + if vQueryList == nil { + // note: explicitly not the empty object. + vQueryList = &WorkflowTemplateJobsPrestoJobQueryList{} + } + if err := extractWorkflowTemplateJobsPrestoJobQueryListFields(r, vQueryList); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vQueryList) { + o.QueryList = vQueryList + } + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &WorkflowTemplateJobsPrestoJobLoggingConfig{} + } + if err := extractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobQueryListFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobQueryList) error { + return nil +} +func postReadExtractWorkflowTemplateJobsPrestoJobLoggingConfigFields(r *WorkflowTemplate, o *WorkflowTemplateJobsPrestoJobLoggingConfig) error { + return nil +} +func postReadExtractWorkflowTemplateJobsSchedulingFields(r *WorkflowTemplate, o *WorkflowTemplateJobsScheduling) error { + return nil +} +func postReadExtractWorkflowTemplateParametersFields(r *WorkflowTemplate, o *WorkflowTemplateParameters) error { + vValidation := o.Validation + if vValidation == nil { + // note: explicitly not the empty object. + vValidation = &WorkflowTemplateParametersValidation{} + } + if err := extractWorkflowTemplateParametersValidationFields(r, vValidation); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValidation) { + o.Validation = vValidation + } + return nil +} +func postReadExtractWorkflowTemplateParametersValidationFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidation) error { + vRegex := o.Regex + if vRegex == nil { + // note: explicitly not the empty object. + vRegex = &WorkflowTemplateParametersValidationRegex{} + } + if err := extractWorkflowTemplateParametersValidationRegexFields(r, vRegex); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRegex) { + o.Regex = vRegex + } + vValues := o.Values + if vValues == nil { + // note: explicitly not the empty object. + vValues = &WorkflowTemplateParametersValidationValues{} + } + if err := extractWorkflowTemplateParametersValidationValuesFields(r, vValues); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vValues) { + o.Values = vValues + } + return nil +} +func postReadExtractWorkflowTemplateParametersValidationRegexFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationRegex) error { + return nil +} +func postReadExtractWorkflowTemplateParametersValidationValuesFields(r *WorkflowTemplate, o *WorkflowTemplateParametersValidationValues) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl new file mode 100644 index 000000000000..726fc3d1a99b --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl @@ -0,0 +1,2230 @@ +package dataproc + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLWorkflowTemplateSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Dataproc/WorkflowTemplate", + Description: "The Dataproc WorkflowTemplate resource", + StructName: "WorkflowTemplate", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a WorkflowTemplate", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workflowTemplate", + Required: true, + Description: "A full instance of a WorkflowTemplate", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a WorkflowTemplate", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workflowTemplate", + Required: true, + Description: "A full instance of a WorkflowTemplate", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a WorkflowTemplate", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "workflowTemplate", + Required: true, + Description: "A full instance of a WorkflowTemplate", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all WorkflowTemplate", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many WorkflowTemplate", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "WorkflowTemplate": &dcl.Component{ + Title: "WorkflowTemplate", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + LabelsField: "labels", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "placement", + "jobs", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. The time template was created.", + Immutable: true, + }, + "dagTimeout": &dcl.Property{ + Type: "string", + GoName: "DagTimeout", + Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", + Immutable: true, + }, + "encryptionConfig": &dcl.Property{ + Type: "object", + GoName: "EncryptionConfig", + GoType: "WorkflowTemplateEncryptionConfig", + Description: "Optional. The encryption configuration for the workflow template.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "kmsKey": &dcl.Property{ + Type: "string", + GoName: "KmsKey", + Description: "Optional. The Cloud KMS key name to use for encryption.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudkms/CryptoKey", + Field: "selfLink", + }, + }, + }, + }, + }, + "jobs": &dcl.Property{ + Type: "array", + GoName: "Jobs", + Description: "Required. The Directed Acyclic Graph of Jobs to submit.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplateJobs", + Required: []string{ + "stepId", + }, + Properties: map[string]*dcl.Property{ + "hadoopJob": &dcl.Property{ + Type: "object", + GoName: "HadoopJob", + GoType: "WorkflowTemplateJobsHadoopJob", + Description: "Optional. Job is a Hadoop job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "archiveUris": &dcl.Property{ + Type: "array", + GoName: "ArchiveUris", + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "args": &dcl.Property{ + Type: "array", + GoName: "Args", + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "fileUris": &dcl.Property{ + Type: "array", + GoName: "FileUris", + Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsHadoopJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "mainClass": &dcl.Property{ + Type: "string", + GoName: "MainClass", + Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + Immutable: true, + }, + "mainJarFileUri": &dcl.Property{ + Type: "string", + GoName: "MainJarFileUri", + Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + Immutable: true, + }, + }, + }, + "hiveJob": &dcl.Property{ + Type: "object", + GoName: "HiveJob", + GoType: "WorkflowTemplateJobsHiveJob", + Description: "Optional. Job is a Hive job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "continueOnFailure": &dcl.Property{ + Type: "boolean", + GoName: "ContinueOnFailure", + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + Immutable: true, + }, + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + Immutable: true, + }, + "queryFileUri": &dcl.Property{ + Type: "string", + GoName: "QueryFileUri", + Description: "The HCFS URI of the script that contains Hive queries.", + Immutable: true, + }, + "queryList": &dcl.Property{ + Type: "object", + GoName: "QueryList", + GoType: "WorkflowTemplateJobsHiveJobQueryList", + Description: "A list of queries.", + Immutable: true, + Required: []string{ + "queries", + }, + Properties: map[string]*dcl.Property{ + "queries": &dcl.Property{ + Type: "array", + GoName: "Queries", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "scriptVariables": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "ScriptVariables", + Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", + Immutable: true, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", + Immutable: true, + }, + "pigJob": &dcl.Property{ + Type: "object", + GoName: "PigJob", + GoType: "WorkflowTemplateJobsPigJob", + Description: "Optional. Job is a Pig job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "continueOnFailure": &dcl.Property{ + Type: "boolean", + GoName: "ContinueOnFailure", + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + Immutable: true, + }, + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsPigJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + Immutable: true, + }, + "queryFileUri": &dcl.Property{ + Type: "string", + GoName: "QueryFileUri", + Description: "The HCFS URI of the script that contains the Pig queries.", + Immutable: true, + }, + "queryList": &dcl.Property{ + Type: "object", + GoName: "QueryList", + GoType: "WorkflowTemplateJobsPigJobQueryList", + Description: "A list of queries.", + Immutable: true, + Required: []string{ + "queries", + }, + Properties: map[string]*dcl.Property{ + "queries": &dcl.Property{ + Type: "array", + GoName: "Queries", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "scriptVariables": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "ScriptVariables", + Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", + Immutable: true, + }, + }, + }, + "prerequisiteStepIds": &dcl.Property{ + Type: "array", + GoName: "PrerequisiteStepIds", + Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "prestoJob": &dcl.Property{ + Type: "object", + GoName: "PrestoJob", + GoType: "WorkflowTemplateJobsPrestoJob", + Description: "Optional. Job is a Presto job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "clientTags": &dcl.Property{ + Type: "array", + GoName: "ClientTags", + Description: "Optional. Presto client tags to attach to this query", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "continueOnFailure": &dcl.Property{ + Type: "boolean", + GoName: "ContinueOnFailure", + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + Immutable: true, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsPrestoJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "outputFormat": &dcl.Property{ + Type: "string", + GoName: "OutputFormat", + Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + Immutable: true, + }, + "queryFileUri": &dcl.Property{ + Type: "string", + GoName: "QueryFileUri", + Description: "The HCFS URI of the script that contains SQL queries.", + Immutable: true, + }, + "queryList": &dcl.Property{ + Type: "object", + GoName: "QueryList", + GoType: "WorkflowTemplateJobsPrestoJobQueryList", + Description: "A list of queries.", + Immutable: true, + Required: []string{ + "queries", + }, + Properties: map[string]*dcl.Property{ + "queries": &dcl.Property{ + Type: "array", + GoName: "Queries", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + }, + }, + "pysparkJob": &dcl.Property{ + Type: "object", + GoName: "PysparkJob", + GoType: "WorkflowTemplateJobsPysparkJob", + Description: "Optional. Job is a PySpark job.", + Immutable: true, + Required: []string{ + "mainPythonFileUri", + }, + Properties: map[string]*dcl.Property{ + "archiveUris": &dcl.Property{ + Type: "array", + GoName: "ArchiveUris", + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "args": &dcl.Property{ + Type: "array", + GoName: "Args", + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "fileUris": &dcl.Property{ + Type: "array", + GoName: "FileUris", + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsPysparkJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "mainPythonFileUri": &dcl.Property{ + Type: "string", + GoName: "MainPythonFileUri", + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Immutable: true, + }, + "pythonFileUris": &dcl.Property{ + Type: "array", + GoName: "PythonFileUris", + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "scheduling": &dcl.Property{ + Type: "object", + GoName: "Scheduling", + GoType: "WorkflowTemplateJobsScheduling", + Description: "Optional. Job scheduling configuration.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "maxFailuresPerHour": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxFailuresPerHour", + Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", + Immutable: true, + }, + "maxFailuresTotal": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MaxFailuresTotal", + Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", + Immutable: true, + }, + }, + }, + "sparkJob": &dcl.Property{ + Type: "object", + GoName: "SparkJob", + GoType: "WorkflowTemplateJobsSparkJob", + Description: "Optional. Job is a Spark job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "archiveUris": &dcl.Property{ + Type: "array", + GoName: "ArchiveUris", + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "args": &dcl.Property{ + Type: "array", + GoName: "Args", + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "fileUris": &dcl.Property{ + Type: "array", + GoName: "FileUris", + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsSparkJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "mainClass": &dcl.Property{ + Type: "string", + GoName: "MainClass", + Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + Immutable: true, + }, + "mainJarFileUri": &dcl.Property{ + Type: "string", + GoName: "MainJarFileUri", + Description: "The HCFS URI of the jar file that contains the main class.", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Immutable: true, + }, + }, + }, + "sparkRJob": &dcl.Property{ + Type: "object", + GoName: "SparkRJob", + GoType: "WorkflowTemplateJobsSparkRJob", + Description: "Optional. Job is a SparkR job.", + Immutable: true, + Required: []string{ + "mainRFileUri", + }, + Properties: map[string]*dcl.Property{ + "archiveUris": &dcl.Property{ + Type: "array", + GoName: "ArchiveUris", + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "args": &dcl.Property{ + Type: "array", + GoName: "Args", + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "fileUris": &dcl.Property{ + Type: "array", + GoName: "FileUris", + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsSparkRJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "mainRFileUri": &dcl.Property{ + Type: "string", + GoName: "MainRFileUri", + Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + Immutable: true, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Immutable: true, + }, + }, + }, + "sparkSqlJob": &dcl.Property{ + Type: "object", + GoName: "SparkSqlJob", + GoType: "WorkflowTemplateJobsSparkSqlJob", + Description: "Optional. Job is a SparkSql job.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "jarFileUris": &dcl.Property{ + Type: "array", + GoName: "JarFileUris", + Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "WorkflowTemplateJobsSparkSqlJobLoggingConfig", + Description: "Optional. The runtime log config for job execution.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "driverLogLevels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DriverLogLevels", + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Immutable: true, + }, + }, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", + Immutable: true, + }, + "queryFileUri": &dcl.Property{ + Type: "string", + GoName: "QueryFileUri", + Description: "The HCFS URI of the script that contains SQL queries.", + Immutable: true, + }, + "queryList": &dcl.Property{ + Type: "object", + GoName: "QueryList", + GoType: "WorkflowTemplateJobsSparkSqlJobQueryList", + Description: "A list of queries.", + Immutable: true, + Required: []string{ + "queries", + }, + Properties: map[string]*dcl.Property{ + "queries": &dcl.Property{ + Type: "array", + GoName: "Queries", + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "scriptVariables": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "ScriptVariables", + Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", + Immutable: true, + }, + }, + }, + "stepId": &dcl.Property{ + Type: "string", + GoName: "StepId", + Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + Immutable: true, + }, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", + Immutable: true, + Parameter: true, + HasLongForm: true, + }, + "parameters": &dcl.Property{ + Type: "array", + GoName: "Parameters", + Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplateParameters", + Required: []string{ + "name", + "fields", + }, + Properties: map[string]*dcl.Property{ + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", + Immutable: true, + }, + "fields": &dcl.Property{ + Type: "array", + GoName: "Fields", + Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", + Immutable: true, + }, + "validation": &dcl.Property{ + Type: "object", + GoName: "Validation", + GoType: "WorkflowTemplateParametersValidation", + Description: "Optional. Validation rules to be applied to this parameter's value.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "regex": &dcl.Property{ + Type: "object", + GoName: "Regex", + GoType: "WorkflowTemplateParametersValidationRegex", + Description: "Validation based on regular expressions.", + Immutable: true, + Required: []string{ + "regexes", + }, + Properties: map[string]*dcl.Property{ + "regexes": &dcl.Property{ + Type: "array", + GoName: "Regexes", + Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "values": &dcl.Property{ + Type: "object", + GoName: "Values", + GoType: "WorkflowTemplateParametersValidationValues", + Description: "Validation based on a list of allowed values.", + Immutable: true, + Required: []string{ + "values", + }, + Properties: map[string]*dcl.Property{ + "values": &dcl.Property{ + Type: "array", + GoName: "Values", + Description: "Required. List of allowed values for the parameter.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + }, + }, + }, + }, + }, + "placement": &dcl.Property{ + Type: "object", + GoName: "Placement", + GoType: "WorkflowTemplatePlacement", + Description: "Required. WorkflowTemplate scheduling information.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "clusterSelector": &dcl.Property{ + Type: "object", + GoName: "ClusterSelector", + GoType: "WorkflowTemplatePlacementClusterSelector", + Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", + Immutable: true, + Required: []string{ + "clusterLabels", + }, + Properties: map[string]*dcl.Property{ + "clusterLabels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "ClusterLabels", + Description: "Required. The cluster labels. Cluster must have all labels to match.", + Immutable: true, + }, + "zone": &dcl.Property{ + Type: "string", + GoName: "Zone", + Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", + Immutable: true, + }, + }, + }, + "managedCluster": &dcl.Property{ + Type: "object", + GoName: "ManagedCluster", + GoType: "WorkflowTemplatePlacementManagedCluster", + Description: "A cluster that is managed by the workflow.", + Immutable: true, + Required: []string{ + "clusterName", + "config", + }, + Properties: map[string]*dcl.Property{ + "clusterName": &dcl.Property{ + Type: "string", + GoName: "ClusterName", + Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + Immutable: true, + }, + "config": &dcl.Property{ + Type: "object", + GoName: "Config", + GoType: "WorkflowTemplatePlacementManagedClusterConfig", + Description: "Required. The cluster configuration.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "autoscalingConfig": &dcl.Property{ + Type: "object", + GoName: "AutoscalingConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "policy": &dcl.Property{ + Type: "string", + GoName: "Policy", + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Dataproc/AutoscalingPolicy", + Field: "name", + }, + }, + }, + }, + }, + "encryptionConfig": &dcl.Property{ + Type: "object", + GoName: "EncryptionConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", + Description: "Optional. Encryption settings for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "gcePdKmsKeyName": &dcl.Property{ + Type: "string", + GoName: "GcePdKmsKeyName", + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudkms/CryptoKey", + Field: "selfLink", + }, + }, + }, + }, + }, + "endpointConfig": &dcl.Property{ + Type: "object", + GoName: "EndpointConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", + Description: "Optional. Port/endpoint configuration for this cluster", + Immutable: true, + Properties: map[string]*dcl.Property{ + "enableHttpPortAccess": &dcl.Property{ + Type: "boolean", + GoName: "EnableHttpPortAccess", + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + Immutable: true, + }, + "httpPorts": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "HttpPorts", + ReadOnly: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Immutable: true, + }, + }, + }, + "gceClusterConfig": &dcl.Property{ + Type: "object", + GoName: "GceClusterConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "internalIPOnly": &dcl.Property{ + Type: "boolean", + GoName: "InternalIPOnly", + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + Immutable: true, + ServerDefault: true, + }, + "metadata": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Metadata", + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Immutable: true, + }, + "network": &dcl.Property{ + Type: "string", + GoName: "Network", + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Network", + Field: "selfLink", + }, + }, + }, + "nodeGroupAffinity": &dcl.Property{ + Type: "object", + GoName: "NodeGroupAffinity", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + Immutable: true, + Required: []string{ + "nodeGroup", + }, + Properties: map[string]*dcl.Property{ + "nodeGroup": &dcl.Property{ + Type: "string", + GoName: "NodeGroup", + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/NodeGroup", + Field: "selfLink", + }, + }, + }, + }, + }, + "privateIPv6GoogleAccess": &dcl.Property{ + Type: "string", + GoName: "PrivateIPv6GoogleAccess", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + Immutable: true, + Enum: []string{ + "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", + "INHERIT_FROM_SUBNETWORK", + "OUTBOUND", + "BIDIRECTIONAL", + }, + }, + "reservationAffinity": &dcl.Property{ + Type: "object", + GoName: "ReservationAffinity", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "consumeReservationType": &dcl.Property{ + Type: "string", + GoName: "ConsumeReservationType", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + Immutable: true, + Enum: []string{ + "TYPE_UNSPECIFIED", + "NO_RESERVATION", + "ANY_RESERVATION", + "SPECIFIC_RESERVATION", + }, + }, + "key": &dcl.Property{ + Type: "string", + GoName: "Key", + Description: "Optional. Corresponds to the label key of reservation resource.", + Immutable: true, + }, + "values": &dcl.Property{ + Type: "array", + GoName: "Values", + Description: "Optional. Corresponds to the label values of reservation resource.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "serviceAccount": &dcl.Property{ + Type: "string", + GoName: "ServiceAccount", + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + }, + "serviceAccountScopes": &dcl.Property{ + Type: "array", + GoName: "ServiceAccountScopes", + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "shieldedInstanceConfig": &dcl.Property{ + Type: "object", + GoName: "ShieldedInstanceConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", + Description: "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "enableIntegrityMonitoring": &dcl.Property{ + Type: "boolean", + GoName: "EnableIntegrityMonitoring", + Description: "Optional. Defines whether instances have integrity monitoring enabled. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not.", + Immutable: true, + }, + "enableSecureBoot": &dcl.Property{ + Type: "boolean", + GoName: "EnableSecureBoot", + Description: "Optional. Defines whether the instances have Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", + Immutable: true, + }, + "enableVtpm": &dcl.Property{ + Type: "boolean", + GoName: "EnableVtpm", + Description: "Optional. Defines whether the instance have the vTPM enabled. Virtual Trusted Platform Module protects objects like keys, certificates and enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline.", + Immutable: true, + }, + }, + }, + "subnetwork": &dcl.Property{ + Type: "string", + GoName: "Subnetwork", + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Subnetwork", + Field: "selfLink", + }, + }, + }, + "tags": &dcl.Property{ + Type: "array", + GoName: "Tags", + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Immutable: true, + SendEmpty: true, + ListType: "set", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "zone": &dcl.Property{ + Type: "string", + GoName: "Zone", + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + Immutable: true, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + "gkeClusterConfig": &dcl.Property{ + Type: "object", + GoName: "GkeClusterConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", + Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "namespacedGkeDeploymentTarget": &dcl.Property{ + Type: "object", + GoName: "NamespacedGkeDeploymentTarget", + GoType: "WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", + Description: "Optional. A target for the deployment.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "clusterNamespace": &dcl.Property{ + Type: "string", + GoName: "ClusterNamespace", + Description: "Optional. A namespace within the GKE cluster to deploy into.", + Immutable: true, + }, + "targetGkeCluster": &dcl.Property{ + Type: "string", + GoName: "TargetGkeCluster", + Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "name", + }, + }, + }, + }, + }, + }, + }, +{{- end }} + "initializationActions": &dcl.Property{ + Type: "array", + GoName: "InitializationActions", + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplatePlacementManagedClusterConfigInitializationActions", + Properties: map[string]*dcl.Property{ + "executableFile": &dcl.Property{ + Type: "string", + GoName: "ExecutableFile", + Description: "Required. Cloud Storage URI of executable file.", + Immutable: true, + }, + "executionTimeout": &dcl.Property{ + Type: "string", + GoName: "ExecutionTimeout", + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + Immutable: true, + }, + }, + }, + }, + "lifecycleConfig": &dcl.Property{ + Type: "object", + GoName: "LifecycleConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", + Description: "Optional. Lifecycle setting for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "autoDeleteTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "AutoDeleteTime", + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "autoDeleteTtl": &dcl.Property{ + Type: "string", + GoName: "AutoDeleteTtl", + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "idleDeleteTtl": &dcl.Property{ + Type: "string", + GoName: "IdleDeleteTtl", + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + "idleStartTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "IdleStartTime", + ReadOnly: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + Immutable: true, + }, + }, + }, + "masterConfig": &dcl.Property{ + Type: "object", + GoName: "MasterConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfig", + Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", +{{- if ne $.TargetVersionName "ga" }} + }, + }, + }, + }, + "metastoreConfig": &dcl.Property{ + Type: "object", + GoName: "MetastoreConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", + Description: "Optional. Metastore configuration.", + Immutable: true, + Required: []string{ + "dataprocMetastoreService", + }, + Properties: map[string]*dcl.Property{ + "dataprocMetastoreService": &dcl.Property{ + Type: "string", + GoName: "DataprocMetastoreService", + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Metastore/Service", + Field: "selfLink", + }, +{{- end }} + }, + }, + }, + }, + "secondaryWorkerConfig": &dcl.Property{ + Type: "object", + GoName: "SecondaryWorkerConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", + Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + }, + }, + }, + }, + "securityConfig": &dcl.Property{ + Type: "object", + GoName: "SecurityConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", + Description: "Optional. Security settings for the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "kerberosConfig": &dcl.Property{ + Type: "object", + GoName: "KerberosConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", + Description: "Optional. Kerberos related configuration.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "crossRealmTrustAdminServer": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustAdminServer", + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + Immutable: true, + }, + "crossRealmTrustKdc": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustKdc", + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + Immutable: true, + }, + "crossRealmTrustRealm": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustRealm", + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + Immutable: true, + }, + "crossRealmTrustSharedPassword": &dcl.Property{ + Type: "string", + GoName: "CrossRealmTrustSharedPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + Immutable: true, + }, + "enableKerberos": &dcl.Property{ + Type: "boolean", + GoName: "EnableKerberos", + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + Immutable: true, + }, + "kdcDbKey": &dcl.Property{ + Type: "string", + GoName: "KdcDbKey", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + Immutable: true, + }, + "keyPassword": &dcl.Property{ + Type: "string", + GoName: "KeyPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + "keystore": &dcl.Property{ + Type: "string", + GoName: "Keystore", + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + Immutable: true, + }, + "keystorePassword": &dcl.Property{ + Type: "string", + GoName: "KeystorePassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + "kmsKey": &dcl.Property{ + Type: "string", + GoName: "KmsKey", + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudkms/CryptoKey", + Field: "selfLink", + }, + }, + }, + "realm": &dcl.Property{ + Type: "string", + GoName: "Realm", + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + Immutable: true, + }, + "rootPrincipalPassword": &dcl.Property{ + Type: "string", + GoName: "RootPrincipalPassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + Immutable: true, + }, + "tgtLifetimeHours": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "TgtLifetimeHours", + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + Immutable: true, + }, + "truststore": &dcl.Property{ + Type: "string", + GoName: "Truststore", + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + Immutable: true, + }, + "truststorePassword": &dcl.Property{ + Type: "string", + GoName: "TruststorePassword", + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + Immutable: true, + }, + }, + }, + }, + }, + "softwareConfig": &dcl.Property{ + Type: "object", + GoName: "SoftwareConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", + Description: "Optional. The config settings for software inside the cluster.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "imageVersion": &dcl.Property{ + Type: "string", + GoName: "ImageVersion", + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + Immutable: true, + }, + "optionalComponents": &dcl.Property{ + Type: "array", + GoName: "OptionalComponents", + Description: "Optional. The set of components to activate on the cluster.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum", + Enum: []string{ + "COMPONENT_UNSPECIFIED", + "ANACONDA", + "DOCKER", + "DRUID", + "FLINK", + "HBASE", + "HIVE_WEBHCAT", + "JUPYTER", + "KERBEROS", + "PRESTO", + "RANGER", + "SOLR", + "ZEPPELIN", + "ZOOKEEPER", + }, + }, + }, + "properties": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Properties", + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Immutable: true, + }, + }, + }, + "stagingBucket": &dcl.Property{ + Type: "string", + GoName: "StagingBucket", + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Storage/Bucket", + Field: "name", + }, + }, + }, + "tempBucket": &dcl.Property{ + Type: "string", + GoName: "TempBucket", + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Storage/Bucket", + Field: "name", + }, + }, + }, + "workerConfig": &dcl.Property{ + Type: "object", + GoName: "WorkerConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "accelerators": &dcl.Property{ + Type: "array", + GoName: "Accelerators", + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Immutable: true, + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", + Properties: map[string]*dcl.Property{ + "acceleratorCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AcceleratorCount", + Description: "The number of the accelerator cards of this type exposed to this instance.", + Immutable: true, + }, + "acceleratorType": &dcl.Property{ + Type: "string", + GoName: "AcceleratorType", + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + Immutable: true, + }, + }, + }, + }, + "diskConfig": &dcl.Property{ + Type: "object", + GoName: "DiskConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", + Description: "Optional. Disk option config settings.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bootDiskSizeGb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "BootDiskSizeGb", + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + Immutable: true, + }, + "bootDiskType": &dcl.Property{ + Type: "string", + GoName: "BootDiskType", + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + Immutable: true, + }, + "numLocalSsds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumLocalSsds", + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + Immutable: true, + ServerDefault: true, + }, + }, + }, + "image": &dcl.Property{ + Type: "string", + GoName: "Image", + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Image", + Field: "selfLink", + }, + }, + }, + "instanceNames": &dcl.Property{ + Type: "array", + GoName: "InstanceNames", + ReadOnly: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Immutable: true, + ServerDefault: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Compute/Instance", + Field: "selfLink", + }, + }, + }, + }, + "isPreemptible": &dcl.Property{ + Type: "boolean", + GoName: "IsPreemptible", + ReadOnly: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + Immutable: true, + }, + "machineType": &dcl.Property{ + Type: "string", + GoName: "MachineType", + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + Immutable: true, + }, + "managedGroupConfig": &dcl.Property{ + Type: "object", + GoName: "ManagedGroupConfig", + GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", + ReadOnly: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Immutable: true, + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "instanceGroupManagerName": &dcl.Property{ + Type: "string", + GoName: "InstanceGroupManagerName", + ReadOnly: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + Immutable: true, + }, + "instanceTemplateName": &dcl.Property{ + Type: "string", + GoName: "InstanceTemplateName", + ReadOnly: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + Immutable: true, + }, + }, + }, + "minCpuPlatform": &dcl.Property{ + Type: "string", + GoName: "MinCpuPlatform", + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + Immutable: true, + ServerDefault: true, + }, + "numInstances": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NumInstances", + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + Immutable: true, + }, + "preemptibility": &dcl.Property{ + Type: "string", + GoName: "Preemptibility", + GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum", + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + Immutable: true, + Enum: []string{ + "PREEMPTIBILITY_UNSPECIFIED", + "NON_PREEMPTIBLE", + "PREEMPTIBLE", + }, + }, + }, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", + Immutable: true, + }, + }, + }, + }, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time template was last updated.", + Immutable: true, + }, + "version": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Version", + ReadOnly: true, + Description: "Output only. The current version of this workflow template.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/client.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/client.go.tmpl new file mode 100644 index 000000000000..ef1efc0c177f --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/client.go.tmpl @@ -0,0 +1,18 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go new file mode 100644 index 000000000000..13ffd583df1b --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLFirebaserulesClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.FirebaserulesBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl new file mode 100644 index 000000000000..dc32f3cb5212 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl @@ -0,0 +1,365 @@ +package firebaserules + +import ( + "context" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Release struct { + Name *string `json:"name"` + RulesetName *string `json:"rulesetName"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Disabled *bool `json:"disabled"` + Project *string `json:"project"` +} + +func (r *Release) String() string { + return dcl.SprintResource(r) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Release) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "firebaserules", + Type: "Release", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "firebaserules", +{{- end }} + } +} + +func (r *Release) ID() (string, error) { + if err := extractReleaseFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "ruleset_name": dcl.ValueOrEmptyString(nr.RulesetName), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "disabled": dcl.ValueOrEmptyString(nr.Disabled), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const ReleaseMaxPage = -1 + +type ReleaseList struct { + Items []*Release + + nextToken string + + pageSize int32 + + resource *Release +} + +func (l *ReleaseList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ReleaseList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listRelease(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListRelease(ctx context.Context, project string) (*ReleaseList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListReleaseWithMaxResults(ctx, project, ReleaseMaxPage) + +} + +func (c *Client) ListReleaseWithMaxResults(ctx context.Context, project string, pageSize int32) (*ReleaseList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Release{ + Project: &project, + } + items, token, err := c.listRelease(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ReleaseList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetRelease(ctx context.Context, r *Release) (*Release, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractReleaseFields(r) + + b, err := c.getReleaseRaw(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalRelease(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeReleaseNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractReleaseFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteRelease(ctx context.Context, r *Release) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Release resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Release...") + deleteOp := deleteReleaseOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllRelease deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllRelease(ctx context.Context, project string, filter func(*Release) bool) error { + listObj, err := c.ListRelease(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllRelease(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllRelease(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyRelease(ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (*Release, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Release + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyReleaseHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyReleaseHelper(c *Client, ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (*Release, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyRelease...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractReleaseFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.releaseDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToReleaseDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []releaseApiOperation + if create { + ops = append(ops, &createReleaseOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyReleaseDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyReleaseDiff(c *Client, ctx context.Context, desired *Release, rawDesired *Release, ops []releaseApiOperation, opts ...dcl.ApplyOption) (*Release, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetRelease(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createReleaseOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapRelease(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeReleaseNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeReleaseNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeReleaseDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractReleaseFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractReleaseFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffRelease(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl new file mode 100644 index 000000000000..a9cae068c615 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl @@ -0,0 +1,614 @@ +package firebaserules + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Release) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "rulesetName"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + return nil +} +func (r *Release) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://firebaserules.googleapis.com/v1/", params) +} + +func (r *Release) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Release) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases", nr.basePath(), userBasePath, params), nil + +} + +func (r *Release) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases", nr.basePath(), userBasePath, params), nil + +} + +func (r *Release) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// releaseApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type releaseApiOperation interface { + do(context.Context, *Release, *Client) error +} + +func (c *Client) listReleaseRaw(ctx context.Context, r *Release, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ReleaseMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listReleaseOperation struct { + Releases []map[string]interface{} `json:"releases"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listRelease(ctx context.Context, r *Release, pageToken string, pageSize int32) ([]*Release, string, error) { + b, err := c.listReleaseRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listReleaseOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Release + for _, v := range m.Releases { + res, err := unmarshalMapRelease(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllRelease(ctx context.Context, f func(*Release) bool, resources []*Release) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteRelease(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteReleaseOperation struct{} + +func (op *deleteReleaseOperation) do(ctx context.Context, r *Release, c *Client) error { + r, err := c.GetRelease(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.InfoWithContextf(ctx, "Release not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetRelease checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Release: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createReleaseOperation struct { + response map[string]interface{} +} + +func (op *createReleaseOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createReleaseOperation) do(ctx context.Context, r *Release, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + if _, err := c.GetRelease(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getReleaseRaw(ctx context.Context, r *Release) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) releaseDiffsForRawDesired(ctx context.Context, rawDesired *Release, opts ...dcl.ApplyOption) (initial, desired *Release, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Release + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Release); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Release, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetRelease(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Release resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Release resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Release resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeReleaseDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Release: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Release: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractReleaseFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeReleaseInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Release: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeReleaseDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Release: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffRelease(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeReleaseInitialState(rawInitial, rawDesired *Release) (*Release, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeReleaseDesiredState(rawDesired, rawInitial *Release, opts ...dcl.ApplyOption) (*Release, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + + return rawDesired, nil + } + canonicalDesired := &Release{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.PartialSelfLinkToSelfLink(rawDesired.RulesetName, rawInitial.RulesetName) { + canonicalDesired.RulesetName = rawInitial.RulesetName + } else { + canonicalDesired.RulesetName = rawDesired.RulesetName + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeReleaseNewState(c *Client, rawNew, rawDesired *Release) (*Release, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.RulesetName) && dcl.IsEmptyValueIndirect(rawDesired.RulesetName) { + rawNew.RulesetName = rawDesired.RulesetName + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.RulesetName, rawNew.RulesetName) { + rawNew.RulesetName = rawDesired.RulesetName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Disabled) && dcl.IsEmptyValueIndirect(rawDesired.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } else { + if dcl.BoolCanonicalize(rawDesired.Disabled, rawNew.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffRelease(c *Client, desired, actual *Release, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.RulesetName, actual.RulesetName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RulesetName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Disabled, actual.Disabled, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Disabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Release) urlNormalized() *Release { + normalized := dcl.Copy(*r).(Release) + normalized.Name = r.Name + normalized.RulesetName = dcl.SelfLinkToName(r.RulesetName) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Release) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Release resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Release) marshal(c *Client) ([]byte, error) { + m, err := expandRelease(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Release: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalRelease decodes JSON responses into the Release resource schema. +func unmarshalRelease(b []byte, c *Client, res *Release) (*Release, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapRelease(m, c, res) +} + +func unmarshalMapRelease(m map[string]interface{}, c *Client, res *Release) (*Release, error) { + + flattened := flattenRelease(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandRelease expands Release into a JSON request object. +func expandRelease(c *Client, f *Release) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveFromPattern("projects/%s/releases/%s", f.Name, dcl.SelfLinkToName(f.Project), f.Name); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v, err := dcl.DeriveField("projects/%s/rulesets/%s", f.RulesetName, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.RulesetName)); err != nil { + return nil, fmt.Errorf("error expanding RulesetName into rulesetName: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["rulesetName"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenRelease flattens Release from a JSON request object into the +// Release type. +func flattenRelease(c *Client, i interface{}, res *Release) *Release { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Release{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.RulesetName = dcl.FlattenString(m["rulesetName"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Disabled = dcl.FlattenBool(m["disabled"]) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Release) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalRelease(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type releaseDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp releaseApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToReleaseDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]releaseDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []releaseDiff + // For each operation name, create a releaseDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := releaseDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToReleaseApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToReleaseApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (releaseApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractReleaseFields(r *Release) error { + return nil +} + +func postReadExtractReleaseFields(r *Release) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl new file mode 100644 index 000000000000..0a0f821c228d --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl @@ -0,0 +1,158 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLReleaseSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Firebaserules/Release", + StructName: "Release", + Reference: &dcl.Link{ + Text: "Firebase Rules API Documentation", + URL: "https://firebase.google.com/docs/reference/rules/rest#rest-resource:-v1.projects.releases", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Get started with Firebase Security Rules", + URL: "https://firebase.google.com/docs/rules/get-started", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Release", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "release", + Required: true, + Description: "A full instance of a Release", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Release", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "release", + Required: true, + Description: "A full instance of a Release", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Release", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "release", + Required: true, + Description: "A full instance of a Release", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Release", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Release", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Release": &dcl.Component{ + Title: "Release", + ID: "projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "rulesetName", + "project", + }, + Properties: map[string]*dcl.Property{ + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Time the release was created.", + Immutable: true, + }, + "disabled": &dcl.Property{ + Type: "boolean", + GoName: "Disabled", + ReadOnly: true, + Description: "Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release.", + Immutable: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", + Immutable: true, + ForwardSlashAllowed: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "rulesetName": &dcl.Property{ + Type: "string", + GoName: "RulesetName", + Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Firebaserules/Ruleset", + Field: "name", + }, + }, + HasLongForm: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Time the release was updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl new file mode 100644 index 000000000000..3e85646f1d5d --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl @@ -0,0 +1,13 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// EncodeReleaseUpdateRequest encapsulates fields in a release {} block, as expected +// by https://firebase.google.com/docs/reference/rules/rest/v1/projects.releases/patch +func EncodeReleaseUpdateRequest(m map[string]interface{}) map[string]interface{} { + req := make(map[string]interface{}) + dcl.PutMapEntry(req, []string{"release"}, m) + return req +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go new file mode 100644 index 000000000000..2da190a1def1 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release.go @@ -0,0 +1,245 @@ +package firebaserules + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirebaserulesRelease() *schema.Resource { + return &schema.Resource{ + Create: resourceFirebaserulesReleaseCreate, + Read: resourceFirebaserulesReleaseRead, + Delete: resourceFirebaserulesReleaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirebaserulesReleaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", + }, + + "ruleset_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the release was created.", + }, + + "disabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the release was updated.", + }, + }, + } +} + +func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyRelease(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Release: %s", err) + } + + log.Printf("[DEBUG] Finished creating Release %q: %#v", d.Id(), res) + + return resourceFirebaserulesReleaseRead(d, meta) +} + +func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetRelease(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("FirebaserulesRelease %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("ruleset_name", res.RulesetName); err != nil { + return fmt.Errorf("error setting ruleset_name in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("disabled", res.Disabled); err != nil { + return fmt.Errorf("error setting disabled in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} + +func resourceFirebaserulesReleaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Release{ + Name: dcl.String(d.Get("name").(string)), + RulesetName: dcl.String(d.Get("ruleset_name").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Release %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteRelease(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Release: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Release %q", d.Id()) + return nil +} + +func resourceFirebaserulesReleaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P.+)/releases/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/releases/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl new file mode 100644 index 000000000000..430e90a47060 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl @@ -0,0 +1,182 @@ +package firebaserules_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" +{{- else }} + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesReleaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(context), + }, + { + ResourceName: "google_firebaserules_release.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccFirebaserulesRelease_StorageReleaseHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaserulesReleaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRelease_StorageReleaseHandWritten(context), + }, + { + ResourceName: "google_firebaserules_release.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore/tf-test-database%{random_suffix}" + project = "%{project_name}" + ruleset_name = "projects/%{project_name}/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "%{project_name}" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +{{- if ne $.TargetVersionName "ga" }} +} + +`, context) +} + +func testAccFirebaserulesRelease_StorageReleaseHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_release" "primary" { + provider = google-beta + name = "firebase.storage/${google_storage_bucket.bucket.name}" + ruleset_name = "projects/%{project_name}/rulesets/${google_firebaserules_ruleset.storage.name}" + project = "%{project_name}" + + lifecycle { + replace_triggered_by = [ + google_firebaserules_ruleset.storage + ] + } +} + +# Provision a non-default Cloud Storage bucket. +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = "%{project_name}" + name = "tf-test-bucket%{random_suffix}" + location = "%{region}" +} + +# Make the Storage bucket accessible for Firebase SDKs, authentication, and Firebase Security Rules. +resource "google_firebase_storage_bucket" "bucket" { + provider = google-beta + project = "%{project_name}" + bucket_id = google_storage_bucket.bucket.name +} + +# Create a ruleset of Firebase Security Rules from a local file. +resource "google_firebaserules_ruleset" "storage" { + provider = google-beta + project = "%{project_name}" + source { + files { + name = "storage.rules" + content = "service firebase.storage {match /b/{bucket}/o {match /{allPaths=**} {allow read, write: if request.auth != null;}}}" + } + } + + depends_on = [ + google_firebase_storage_bucket.bucket + ] +{{- end }} +} + +`, context) +} + +func testAccCheckFirebaserulesReleaseDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_firebaserules_release" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &firebaserules.Release{ + Name: dcl.String(rs.Primary.Attributes["name"]), + RulesetName: dcl.String(rs.Primary.Attributes["ruleset_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Disabled: dcl.Bool(rs.Primary.Attributes["disabled"] == "true"), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetRelease(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_firebaserules_release still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go new file mode 100644 index 000000000000..a8c61a3ece0a --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_sweeper.go @@ -0,0 +1,53 @@ +package firebaserules + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("FirebaserulesRelease", testSweepFirebaserulesRelease) +} + +func testSweepFirebaserulesRelease(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRelease") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRelease(context.Background(), d["project"], isDeletableFirebaserulesRelease) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRelease(r *Release) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go new file mode 100644 index 000000000000..2059d81bca50 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset.go @@ -0,0 +1,409 @@ +package firebaserules + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirebaserulesRuleset() *schema.Resource { + return &schema.Resource{ + Create: resourceFirebaserulesRulesetCreate, + Read: resourceFirebaserulesRulesetRead, + Delete: resourceFirebaserulesRulesetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirebaserulesRulesetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "`Source` for the `Ruleset`.", + MaxItems: 1, + Elem: FirebaserulesRulesetSourceSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time the `Ruleset` was created.", + }, + + "metadata": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The metadata for this ruleset.", + Elem: FirebaserulesRulesetMetadataSchema(), + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}`", + }, + }, + } +} + +func FirebaserulesRulesetSourceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "files": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "`File` set constituting the `Source` bundle.", + Elem: FirebaserulesRulesetSourceFilesSchema(), + }, + + "language": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "`Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS", + }, + }, + } +} + +func FirebaserulesRulesetSourceFilesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Textual Content.", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "File name.", + }, + + "fingerprint": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Fingerprint (e.g. github sha) associated with the `File`.", + }, + }, + } +} + +func FirebaserulesRulesetMetadataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "services": { + Type: schema.TypeList, + Computed: true, + Description: "Services that this ruleset has declarations for (e.g., \"cloud.firestore\"). There may be 0+ of these.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyRuleset(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Ruleset: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Ruleset %q: %#v", d.Id(), res) + + return resourceFirebaserulesRulesetRead(d, meta) +} + +func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetRuleset(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("FirebaserulesRuleset %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("source", flattenFirebaserulesRulesetSource(res.Source)); err != nil { + return fmt.Errorf("error setting source in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("metadata", flattenFirebaserulesRulesetMetadata(res.Metadata)); err != nil { + return fmt.Errorf("error setting metadata in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + + return nil +} + +func resourceFirebaserulesRulesetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Ruleset{ + Source: expandFirebaserulesRulesetSource(d.Get("source")), + Project: dcl.String(project), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Ruleset %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteRuleset(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Ruleset: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Ruleset %q", d.Id()) + return nil +} + +func resourceFirebaserulesRulesetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/rulesets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandFirebaserulesRulesetSource(o interface{}) *RulesetSource { + if o == nil { + return EmptyRulesetSource + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyRulesetSource + } + obj := objArr[0].(map[string]interface{}) + return &RulesetSource{ + Files: expandFirebaserulesRulesetSourceFilesArray(obj["files"]), + Language: RulesetSourceLanguageEnumRef(obj["language"].(string)), + } +} + +func flattenFirebaserulesRulesetSource(obj *RulesetSource) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "files": flattenFirebaserulesRulesetSourceFilesArray(obj.Files), + "language": obj.Language, + } + + return []interface{}{transformed} + +} +func expandFirebaserulesRulesetSourceFilesArray(o interface{}) []RulesetSourceFiles { + if o == nil { + return make([]RulesetSourceFiles, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]RulesetSourceFiles, 0) + } + + items := make([]RulesetSourceFiles, 0, len(objs)) + for _, item := range objs { + i := expandFirebaserulesRulesetSourceFiles(item) + items = append(items, *i) + } + + return items +} + +func expandFirebaserulesRulesetSourceFiles(o interface{}) *RulesetSourceFiles { + if o == nil { + return EmptyRulesetSourceFiles + } + + obj := o.(map[string]interface{}) + return &RulesetSourceFiles{ + Content: dcl.String(obj["content"].(string)), + Name: dcl.String(obj["name"].(string)), + Fingerprint: dcl.String(obj["fingerprint"].(string)), + } +} + +func flattenFirebaserulesRulesetSourceFilesArray(objs []RulesetSourceFiles) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenFirebaserulesRulesetSourceFiles(&item) + items = append(items, i) + } + + return items +} + +func flattenFirebaserulesRulesetSourceFiles(obj *RulesetSourceFiles) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "content": obj.Content, + "name": obj.Name, + "fingerprint": obj.Fingerprint, + } + + return transformed + +} + +func flattenFirebaserulesRulesetMetadata(obj *RulesetMetadata) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "services": obj.Services, + } + + return []interface{}{transformed} + +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl new file mode 100644 index 000000000000..ee37076f9796 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl @@ -0,0 +1,139 @@ +package firebaserules_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" +{{- else }} + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccFirebaserulesRuleset_BasicRuleset(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesRulesetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRuleset_BasicRuleset(context), + }, + { + ResourceName: "google_firebaserules_ruleset.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccFirebaserulesRuleset_MinimalRuleset(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckFirebaserulesRulesetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaserulesRuleset_MinimalRuleset(context), + }, + { + ResourceName: "google_firebaserules_ruleset.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccFirebaserulesRuleset_BasicRuleset(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + fingerprint = "" + } + + language = "" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccFirebaserulesRuleset_MinimalRuleset(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckFirebaserulesRulesetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_firebaserules_ruleset" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &firebaserules.Ruleset{ + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetRuleset(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_firebaserules_ruleset still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go new file mode 100644 index 000000000000..56b32ba3f279 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_sweeper.go @@ -0,0 +1,53 @@ +package firebaserules + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("FirebaserulesRuleset", testSweepFirebaserulesRuleset) +} + +func testSweepFirebaserulesRuleset(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRuleset") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRuleset(context.Background(), d["project"], isDeletableFirebaserulesRuleset) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRuleset(r *Ruleset) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl new file mode 100644 index 000000000000..36b1055d12f4 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl @@ -0,0 +1,539 @@ +package firebaserules + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Ruleset struct { + Name *string `json:"name"` + Source *RulesetSource `json:"source"` + CreateTime *string `json:"createTime"` + Metadata *RulesetMetadata `json:"metadata"` + Project *string `json:"project"` +} + +func (r *Ruleset) String() string { + return dcl.SprintResource(r) +} + +// The enum RulesetSourceLanguageEnum. +type RulesetSourceLanguageEnum string + +// RulesetSourceLanguageEnumRef returns a *RulesetSourceLanguageEnum with the value of string s +// If the empty string is provided, nil is returned. +func RulesetSourceLanguageEnumRef(s string) *RulesetSourceLanguageEnum { + v := RulesetSourceLanguageEnum(s) + return &v +} + +func (v RulesetSourceLanguageEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"LANGUAGE_UNSPECIFIED", "FIREBASE_RULES", "EVENT_FLOW_TRIGGERS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "RulesetSourceLanguageEnum", + Value: string(v), + Valid: []string{}, + } +} + +type RulesetSource struct { + empty bool `json:"-"` + Files []RulesetSourceFiles `json:"files"` + Language *RulesetSourceLanguageEnum `json:"language"` +} + +type jsonRulesetSource RulesetSource + +func (r *RulesetSource) UnmarshalJSON(data []byte) error { + var res jsonRulesetSource + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetSource + } else { + + r.Files = res.Files + + r.Language = res.Language + + } + return nil +} + +// This object is used to assert a desired state where this RulesetSource is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetSource *RulesetSource = &RulesetSource{empty: true} + +func (r *RulesetSource) Empty() bool { + return r.empty +} + +func (r *RulesetSource) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetSource) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type RulesetSourceFiles struct { + empty bool `json:"-"` + Content *string `json:"content"` + Name *string `json:"name"` + Fingerprint *string `json:"fingerprint"` +} + +type jsonRulesetSourceFiles RulesetSourceFiles + +func (r *RulesetSourceFiles) UnmarshalJSON(data []byte) error { + var res jsonRulesetSourceFiles + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetSourceFiles + } else { + + r.Content = res.Content + + r.Name = res.Name + + r.Fingerprint = res.Fingerprint + + } + return nil +} + +// This object is used to assert a desired state where this RulesetSourceFiles is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetSourceFiles *RulesetSourceFiles = &RulesetSourceFiles{empty: true} + +func (r *RulesetSourceFiles) Empty() bool { + return r.empty +} + +func (r *RulesetSourceFiles) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetSourceFiles) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type RulesetMetadata struct { + empty bool `json:"-"` + Services []string `json:"services"` +} + +type jsonRulesetMetadata RulesetMetadata + +func (r *RulesetMetadata) UnmarshalJSON(data []byte) error { + var res jsonRulesetMetadata + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyRulesetMetadata + } else { + + r.Services = res.Services + + } + return nil +} + +// This object is used to assert a desired state where this RulesetMetadata is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyRulesetMetadata *RulesetMetadata = &RulesetMetadata{empty: true} + +func (r *RulesetMetadata) Empty() bool { + return r.empty +} + +func (r *RulesetMetadata) String() string { + return dcl.SprintResource(r) +} + +func (r *RulesetMetadata) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Ruleset) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "firebaserules", + Type: "Ruleset", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "firebaserules", +{{- end }} + } +} + +func (r *Ruleset) ID() (string, error) { + if err := extractRulesetFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "source": dcl.ValueOrEmptyString(nr.Source), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "metadata": dcl.ValueOrEmptyString(nr.Metadata), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const RulesetMaxPage = -1 + +type RulesetList struct { + Items []*Ruleset + + nextToken string + + pageSize int32 + + resource *Ruleset +} + +func (l *RulesetList) HasNext() bool { + return l.nextToken != "" +} + +func (l *RulesetList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listRuleset(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListRuleset(ctx context.Context, project string) (*RulesetList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListRulesetWithMaxResults(ctx, project, RulesetMaxPage) + +} + +func (c *Client) ListRulesetWithMaxResults(ctx context.Context, project string, pageSize int32) (*RulesetList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Ruleset{ + Project: &project, + } + items, token, err := c.listRuleset(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &RulesetList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetRuleset(ctx context.Context, r *Ruleset) (*Ruleset, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractRulesetFields(r) + + b, err := c.getRulesetRaw(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalRuleset(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeRulesetNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractRulesetFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteRuleset(ctx context.Context, r *Ruleset) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Ruleset resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Ruleset...") + deleteOp := deleteRulesetOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllRuleset deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllRuleset(ctx context.Context, project string, filter func(*Ruleset) bool) error { + listObj, err := c.ListRuleset(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllRuleset(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllRuleset(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyRuleset(ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Ruleset + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyRulesetHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyRulesetHelper(c *Client, ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyRuleset...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractRulesetFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.rulesetDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToRulesetDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []rulesetApiOperation + if create { + ops = append(ops, &createRulesetOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyRulesetDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyRulesetDiff(c *Client, ctx context.Context, desired *Ruleset, rawDesired *Ruleset, ops []rulesetApiOperation, opts ...dcl.ApplyOption) (*Ruleset, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetRuleset(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createRulesetOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapRuleset(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeRulesetNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeRulesetNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeRulesetDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractRulesetFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractRulesetFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffRuleset(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl new file mode 100644 index 000000000000..a0cf13b109b8 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl @@ -0,0 +1,1577 @@ +package firebaserules + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Ruleset) validate() error { + + if err := dcl.Required(r, "source"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Source) { + if err := r.Source.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Metadata) { + if err := r.Metadata.validate(); err != nil { + return err + } + } + return nil +} +func (r *RulesetSource) validate() error { + if err := dcl.Required(r, "files"); err != nil { + return err + } + return nil +} +func (r *RulesetSourceFiles) validate() error { + if err := dcl.Required(r, "content"); err != nil { + return err + } + if err := dcl.Required(r, "name"); err != nil { + return err + } + return nil +} +func (r *RulesetMetadata) validate() error { + return nil +} +func (r *Ruleset) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://firebaserules.googleapis.com/v1/", params) +} + +func (r *Ruleset) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Ruleset) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Ruleset) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets", nr.basePath(), userBasePath, params), nil + +} + +func (r *Ruleset) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// rulesetApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type rulesetApiOperation interface { + do(context.Context, *Ruleset, *Client) error +} + +func (c *Client) listRulesetRaw(ctx context.Context, r *Ruleset, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != RulesetMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listRulesetOperation struct { + Rulesets []map[string]interface{} `json:"rulesets"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listRuleset(ctx context.Context, r *Ruleset, pageToken string, pageSize int32) ([]*Ruleset, string, error) { + b, err := c.listRulesetRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listRulesetOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Ruleset + for _, v := range m.Rulesets { + res, err := unmarshalMapRuleset(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllRuleset(ctx context.Context, f func(*Ruleset) bool, resources []*Ruleset) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteRuleset(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteRulesetOperation struct{} + +func (op *deleteRulesetOperation) do(ctx context.Context, r *Ruleset, c *Client) error { + r, err := c.GetRuleset(ctx, r) + if err != nil { + if dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.InfoWithContextf(ctx, "Ruleset not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetRuleset checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Ruleset: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createRulesetOperation struct { + response map[string]interface{} +} + +func (op *createRulesetOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createRulesetOperation) do(ctx context.Context, r *Ruleset, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Ruleset with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetRuleset(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getRulesetRaw(ctx context.Context, r *Ruleset) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) rulesetDiffsForRawDesired(ctx context.Context, rawDesired *Ruleset, opts ...dcl.ApplyOption) (initial, desired *Ruleset, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Ruleset + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Ruleset); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Ruleset, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeRulesetDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetRuleset(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFoundOrCode(err, 400) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Ruleset resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Ruleset resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Ruleset resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeRulesetDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Ruleset: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Ruleset: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractRulesetFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeRulesetInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Ruleset: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeRulesetDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Ruleset: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffRuleset(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeRulesetInitialState(rawInitial, rawDesired *Ruleset) (*Ruleset, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeRulesetDesiredState(rawDesired, rawInitial *Ruleset, opts ...dcl.ApplyOption) (*Ruleset, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Source = canonicalizeRulesetSource(rawDesired.Source, nil, opts...) + rawDesired.Metadata = canonicalizeRulesetMetadata(rawDesired.Metadata, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Ruleset{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + canonicalDesired.Source = canonicalizeRulesetSource(rawDesired.Source, rawInitial.Source, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + return canonicalDesired, nil +} + +func canonicalizeRulesetNewState(c *Client, rawNew, rawDesired *Ruleset) (*Ruleset, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Source) && dcl.IsEmptyValueIndirect(rawDesired.Source) { + rawNew.Source = rawDesired.Source + } else { + rawNew.Source = canonicalizeNewRulesetSource(c, rawDesired.Source, rawNew.Source) + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Metadata) && dcl.IsEmptyValueIndirect(rawDesired.Metadata) { + rawNew.Metadata = rawDesired.Metadata + } else { + rawNew.Metadata = canonicalizeNewRulesetMetadata(c, rawDesired.Metadata, rawNew.Metadata) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeRulesetSource(des, initial *RulesetSource, opts ...dcl.ApplyOption) *RulesetSource { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetSource{} + + cDes.Files = canonicalizeRulesetSourceFilesSlice(des.Files, initial.Files, opts...) + if dcl.IsZeroValue(des.Language) || (dcl.IsEmptyValueIndirect(des.Language) && dcl.IsEmptyValueIndirect(initial.Language)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Language = initial.Language + } else { + cDes.Language = des.Language + } + + return cDes +} + +func canonicalizeRulesetSourceSlice(des, initial []RulesetSource, opts ...dcl.ApplyOption) []RulesetSource { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetSource, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetSource(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetSource, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetSource(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetSource(c *Client, des, nw *RulesetSource) *RulesetSource { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetSource while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Files = canonicalizeNewRulesetSourceFilesSlice(c, des.Files, nw.Files) + + return nw +} + +func canonicalizeNewRulesetSourceSet(c *Client, des, nw []RulesetSource) []RulesetSource { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetSource + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetSourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetSource(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetSourceSlice(c *Client, des, nw []RulesetSource) []RulesetSource { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetSource + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetSource(c, &d, &n)) + } + + return items +} + +func canonicalizeRulesetSourceFiles(des, initial *RulesetSourceFiles, opts ...dcl.ApplyOption) *RulesetSourceFiles { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetSourceFiles{} + + if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { + cDes.Content = initial.Content + } else { + cDes.Content = des.Content + } + if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { + cDes.Name = initial.Name + } else { + cDes.Name = des.Name + } + if dcl.StringCanonicalize(des.Fingerprint, initial.Fingerprint) || dcl.IsZeroValue(des.Fingerprint) { + cDes.Fingerprint = initial.Fingerprint + } else { + cDes.Fingerprint = des.Fingerprint + } + + return cDes +} + +func canonicalizeRulesetSourceFilesSlice(des, initial []RulesetSourceFiles, opts ...dcl.ApplyOption) []RulesetSourceFiles { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetSourceFiles, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetSourceFiles(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetSourceFiles, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetSourceFiles(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetSourceFiles(c *Client, des, nw *RulesetSourceFiles) *RulesetSourceFiles { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetSourceFiles while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Content, nw.Content) { + nw.Content = des.Content + } + if dcl.StringCanonicalize(des.Name, nw.Name) { + nw.Name = des.Name + } + if dcl.StringCanonicalize(des.Fingerprint, nw.Fingerprint) { + nw.Fingerprint = des.Fingerprint + } + + return nw +} + +func canonicalizeNewRulesetSourceFilesSet(c *Client, des, nw []RulesetSourceFiles) []RulesetSourceFiles { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetSourceFiles + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetSourceFilesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetSourceFiles(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetSourceFilesSlice(c *Client, des, nw []RulesetSourceFiles) []RulesetSourceFiles { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetSourceFiles + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetSourceFiles(c, &d, &n)) + } + + return items +} + +func canonicalizeRulesetMetadata(des, initial *RulesetMetadata, opts ...dcl.ApplyOption) *RulesetMetadata { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &RulesetMetadata{} + + if dcl.StringArrayCanonicalize(des.Services, initial.Services) { + cDes.Services = initial.Services + } else { + cDes.Services = des.Services + } + + return cDes +} + +func canonicalizeRulesetMetadataSlice(des, initial []RulesetMetadata, opts ...dcl.ApplyOption) []RulesetMetadata { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]RulesetMetadata, 0, len(des)) + for _, d := range des { + cd := canonicalizeRulesetMetadata(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]RulesetMetadata, 0, len(des)) + for i, d := range des { + cd := canonicalizeRulesetMetadata(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewRulesetMetadata(c *Client, des, nw *RulesetMetadata) *RulesetMetadata { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for RulesetMetadata while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.Services, nw.Services) { + nw.Services = des.Services + } + + return nw +} + +func canonicalizeNewRulesetMetadataSet(c *Client, des, nw []RulesetMetadata) []RulesetMetadata { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []RulesetMetadata + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareRulesetMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewRulesetMetadata(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewRulesetMetadataSlice(c *Client, des, nw []RulesetMetadata) []RulesetMetadata { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []RulesetMetadata + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewRulesetMetadata(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffRuleset(c *Client, desired, actual *Ruleset, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Source, actual.Source, dcl.DiffInfo{ObjectFunction: compareRulesetSourceNewStyle, EmptyObject: EmptyRulesetSource, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Source")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareRulesetMetadataNewStyle, EmptyObject: EmptyRulesetMetadata, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareRulesetSourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetSource) + if !ok { + desiredNotPointer, ok := d.(RulesetSource) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSource or *RulesetSource", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetSource) + if !ok { + actualNotPointer, ok := a.(RulesetSource) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSource", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Files, actual.Files, dcl.DiffInfo{ObjectFunction: compareRulesetSourceFilesNewStyle, EmptyObject: EmptyRulesetSourceFiles, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Files")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Language, actual.Language, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Language")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareRulesetSourceFilesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetSourceFiles) + if !ok { + desiredNotPointer, ok := d.(RulesetSourceFiles) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSourceFiles or *RulesetSourceFiles", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetSourceFiles) + if !ok { + actualNotPointer, ok := a.(RulesetSourceFiles) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetSourceFiles", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Content")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fingerprint, actual.Fingerprint, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Fingerprint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareRulesetMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*RulesetMetadata) + if !ok { + desiredNotPointer, ok := d.(RulesetMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetMetadata or *RulesetMetadata", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*RulesetMetadata) + if !ok { + actualNotPointer, ok := a.(RulesetMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a RulesetMetadata", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Services, actual.Services, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Services")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Ruleset) urlNormalized() *Ruleset { + normalized := dcl.Copy(*r).(Ruleset) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Ruleset) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Ruleset resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Ruleset) marshal(c *Client) ([]byte, error) { + m, err := expandRuleset(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Ruleset: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalRuleset decodes JSON responses into the Ruleset resource schema. +func unmarshalRuleset(b []byte, c *Client, res *Ruleset) (*Ruleset, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapRuleset(m, c, res) +} + +func unmarshalMapRuleset(m map[string]interface{}, c *Client, res *Ruleset) (*Ruleset, error) { + + flattened := flattenRuleset(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandRuleset expands Ruleset into a JSON request object. +func expandRuleset(c *Client, f *Ruleset) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/rulesets/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v, err := expandRulesetSource(c, f.Source, res); err != nil { + return nil, fmt.Errorf("error expanding Source into source: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["source"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenRuleset flattens Ruleset from a JSON request object into the +// Ruleset type. +func flattenRuleset(c *Client, i interface{}, res *Ruleset) *Ruleset { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Ruleset{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.Source = flattenRulesetSource(c, m["source"], res) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.Metadata = flattenRulesetMetadata(c, m["metadata"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandRulesetSourceMap expands the contents of RulesetSource into a JSON +// request object. +func expandRulesetSourceMap(c *Client, f map[string]RulesetSource, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetSource(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetSourceSlice expands the contents of RulesetSource into a JSON +// request object. +func expandRulesetSourceSlice(c *Client, f []RulesetSource, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetSource(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetSourceMap flattens the contents of RulesetSource from a JSON +// response object. +func flattenRulesetSourceMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSource { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSource{} + } + + if len(a) == 0 { + return map[string]RulesetSource{} + } + + items := make(map[string]RulesetSource) + for k, item := range a { + items[k] = *flattenRulesetSource(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetSourceSlice flattens the contents of RulesetSource from a JSON +// response object. +func flattenRulesetSourceSlice(c *Client, i interface{}, res *Ruleset) []RulesetSource { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSource{} + } + + if len(a) == 0 { + return []RulesetSource{} + } + + items := make([]RulesetSource, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSource(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetSource expands an instance of RulesetSource into a JSON +// request object. +func expandRulesetSource(c *Client, f *RulesetSource, res *Ruleset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandRulesetSourceFilesSlice(c, f.Files, res); err != nil { + return nil, fmt.Errorf("error expanding Files into files: %w", err) + } else if v != nil { + m["files"] = v + } + if v := f.Language; !dcl.IsEmptyValueIndirect(v) { + m["language"] = v + } + + return m, nil +} + +// flattenRulesetSource flattens an instance of RulesetSource from a JSON +// response object. +func flattenRulesetSource(c *Client, i interface{}, res *Ruleset) *RulesetSource { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetSource{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetSource + } + r.Files = flattenRulesetSourceFilesSlice(c, m["files"], res) + r.Language = flattenRulesetSourceLanguageEnum(m["language"]) + + return r +} + +// expandRulesetSourceFilesMap expands the contents of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFilesMap(c *Client, f map[string]RulesetSourceFiles, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetSourceFiles(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetSourceFilesSlice expands the contents of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFilesSlice(c *Client, f []RulesetSourceFiles, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetSourceFiles(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetSourceFilesMap flattens the contents of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFilesMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSourceFiles { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSourceFiles{} + } + + if len(a) == 0 { + return map[string]RulesetSourceFiles{} + } + + items := make(map[string]RulesetSourceFiles) + for k, item := range a { + items[k] = *flattenRulesetSourceFiles(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetSourceFilesSlice flattens the contents of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFilesSlice(c *Client, i interface{}, res *Ruleset) []RulesetSourceFiles { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSourceFiles{} + } + + if len(a) == 0 { + return []RulesetSourceFiles{} + } + + items := make([]RulesetSourceFiles, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSourceFiles(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetSourceFiles expands an instance of RulesetSourceFiles into a JSON +// request object. +func expandRulesetSourceFiles(c *Client, f *RulesetSourceFiles, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Content; !dcl.IsEmptyValueIndirect(v) { + m["content"] = v + } + if v := f.Name; !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Fingerprint; !dcl.IsEmptyValueIndirect(v) { + m["fingerprint"] = v + } + + return m, nil +} + +// flattenRulesetSourceFiles flattens an instance of RulesetSourceFiles from a JSON +// response object. +func flattenRulesetSourceFiles(c *Client, i interface{}, res *Ruleset) *RulesetSourceFiles { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetSourceFiles{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetSourceFiles + } + r.Content = dcl.FlattenString(m["content"]) + r.Name = dcl.FlattenString(m["name"]) + r.Fingerprint = dcl.FlattenString(m["fingerprint"]) + + return r +} + +// expandRulesetMetadataMap expands the contents of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadataMap(c *Client, f map[string]RulesetMetadata, res *Ruleset) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandRulesetMetadata(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandRulesetMetadataSlice expands the contents of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadataSlice(c *Client, f []RulesetMetadata, res *Ruleset) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandRulesetMetadata(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenRulesetMetadataMap flattens the contents of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadataMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetMetadata { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetMetadata{} + } + + if len(a) == 0 { + return map[string]RulesetMetadata{} + } + + items := make(map[string]RulesetMetadata) + for k, item := range a { + items[k] = *flattenRulesetMetadata(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenRulesetMetadataSlice flattens the contents of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadataSlice(c *Client, i interface{}, res *Ruleset) []RulesetMetadata { + a, ok := i.([]interface{}) + if !ok { + return []RulesetMetadata{} + } + + if len(a) == 0 { + return []RulesetMetadata{} + } + + items := make([]RulesetMetadata, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetMetadata(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandRulesetMetadata expands an instance of RulesetMetadata into a JSON +// request object. +func expandRulesetMetadata(c *Client, f *RulesetMetadata, res *Ruleset) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Services; v != nil { + m["services"] = v + } + + return m, nil +} + +// flattenRulesetMetadata flattens an instance of RulesetMetadata from a JSON +// response object. +func flattenRulesetMetadata(c *Client, i interface{}, res *Ruleset) *RulesetMetadata { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &RulesetMetadata{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyRulesetMetadata + } + r.Services = dcl.FlattenStringSlice(m["services"]) + + return r +} + +// flattenRulesetSourceLanguageEnumMap flattens the contents of RulesetSourceLanguageEnum from a JSON +// response object. +func flattenRulesetSourceLanguageEnumMap(c *Client, i interface{}, res *Ruleset) map[string]RulesetSourceLanguageEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]RulesetSourceLanguageEnum{} + } + + if len(a) == 0 { + return map[string]RulesetSourceLanguageEnum{} + } + + items := make(map[string]RulesetSourceLanguageEnum) + for k, item := range a { + items[k] = *flattenRulesetSourceLanguageEnum(item.(interface{})) + } + + return items +} + +// flattenRulesetSourceLanguageEnumSlice flattens the contents of RulesetSourceLanguageEnum from a JSON +// response object. +func flattenRulesetSourceLanguageEnumSlice(c *Client, i interface{}, res *Ruleset) []RulesetSourceLanguageEnum { + a, ok := i.([]interface{}) + if !ok { + return []RulesetSourceLanguageEnum{} + } + + if len(a) == 0 { + return []RulesetSourceLanguageEnum{} + } + + items := make([]RulesetSourceLanguageEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenRulesetSourceLanguageEnum(item.(interface{}))) + } + + return items +} + +// flattenRulesetSourceLanguageEnum asserts that an interface is a string, and returns a +// pointer to a *RulesetSourceLanguageEnum with the same value as that string. +func flattenRulesetSourceLanguageEnum(i interface{}) *RulesetSourceLanguageEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return RulesetSourceLanguageEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Ruleset) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalRuleset(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type rulesetDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp rulesetApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToRulesetDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]rulesetDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []rulesetDiff + // For each operation name, create a rulesetDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := rulesetDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToRulesetApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToRulesetApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (rulesetApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractRulesetFields(r *Ruleset) error { + vSource := r.Source + if vSource == nil { + // note: explicitly not the empty object. + vSource = &RulesetSource{} + } + if err := extractRulesetSourceFields(r, vSource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSource) { + r.Source = vSource + } + vMetadata := r.Metadata + if vMetadata == nil { + // note: explicitly not the empty object. + vMetadata = &RulesetMetadata{} + } + if err := extractRulesetMetadataFields(r, vMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetadata) { + r.Metadata = vMetadata + } + return nil +} +func extractRulesetSourceFields(r *Ruleset, o *RulesetSource) error { + return nil +} +func extractRulesetSourceFilesFields(r *Ruleset, o *RulesetSourceFiles) error { + return nil +} +func extractRulesetMetadataFields(r *Ruleset, o *RulesetMetadata) error { + return nil +} + +func postReadExtractRulesetFields(r *Ruleset) error { + vSource := r.Source + if vSource == nil { + // note: explicitly not the empty object. + vSource = &RulesetSource{} + } + if err := postReadExtractRulesetSourceFields(r, vSource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSource) { + r.Source = vSource + } + vMetadata := r.Metadata + if vMetadata == nil { + // note: explicitly not the empty object. + vMetadata = &RulesetMetadata{} + } + if err := postReadExtractRulesetMetadataFields(r, vMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMetadata) { + r.Metadata = vMetadata + } + return nil +} +func postReadExtractRulesetSourceFields(r *Ruleset, o *RulesetSource) error { + return nil +} +func postReadExtractRulesetSourceFilesFields(r *Ruleset, o *RulesetSourceFiles) error { + return nil +} +func postReadExtractRulesetMetadataFields(r *Ruleset, o *RulesetMetadata) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl new file mode 100644 index 000000000000..7e0d38fc0244 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl @@ -0,0 +1,211 @@ +package firebaserules + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLRulesetSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Firebaserules/Ruleset", + StructName: "Ruleset", + Reference: &dcl.Link{ + Text: "Firebase Ruleset API Documentation", + URL: "https://firebase.google.com/docs/reference/rules/rest#rest-resource:-v1.projects.rulesets", + }, + Guides: []*dcl.Link{ + &dcl.Link{ + Text: "Get started with Firebase Security Rules", + URL: "https://firebase.google.com/docs/rules/get-started", + }, + }, + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Ruleset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "ruleset", + Required: true, + Description: "A full instance of a Ruleset", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Ruleset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "ruleset", + Required: true, + Description: "A full instance of a Ruleset", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Ruleset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "ruleset", + Required: true, + Description: "A full instance of a Ruleset", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Ruleset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Ruleset", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Ruleset": &dcl.Component{ + Title: "Ruleset", + ID: "projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "source", + "project", + }, + Properties: map[string]*dcl.Property{ + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Time the `Ruleset` was created.", + Immutable: true, + }, + "metadata": &dcl.Property{ + Type: "object", + GoName: "Metadata", + GoType: "RulesetMetadata", + ReadOnly: true, + Description: "Output only. The metadata for this ruleset.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "services": &dcl.Property{ + Type: "array", + GoName: "Services", + Description: "Services that this ruleset has declarations for (e.g., \"cloud.firestore\"). There may be 0+ of these.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}`", + Immutable: true, + ServerGeneratedParameter: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "source": &dcl.Property{ + Type: "object", + GoName: "Source", + GoType: "RulesetSource", + Description: "`Source` for the `Ruleset`.", + Immutable: true, + Required: []string{ + "files", + }, + Properties: map[string]*dcl.Property{ + "files": &dcl.Property{ + Type: "array", + GoName: "Files", + Description: "`File` set constituting the `Source` bundle.", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "RulesetSourceFiles", + Required: []string{ + "content", + "name", + }, + Properties: map[string]*dcl.Property{ + "content": &dcl.Property{ + Type: "string", + GoName: "Content", + Description: "Textual Content.", + Immutable: true, + }, + "fingerprint": &dcl.Property{ + Type: "string", + GoName: "Fingerprint", + Description: "Fingerprint (e.g. github sha) associated with the `File`.", + Immutable: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "File name.", + Immutable: true, + }, + }, + }, + }, + "language": &dcl.Property{ + Type: "string", + GoName: "Language", + GoType: "RulesetSourceLanguageEnum", + Description: "`Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS", + Immutable: true, + Enum: []string{ + "LANGUAGE_UNSPECIFIED", + "FIREBASE_RULES", + "EVENT_FLOW_TRIGGERS", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/client.go.tmpl b/mmv1/third_party/terraform/services/gkehub/client.go.tmpl new file mode 100644 index 000000000000..a29d06089889 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/client.go.tmpl @@ -0,0 +1,18 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl new file mode 100644 index 000000000000..3b1fb2c99e6b --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl @@ -0,0 +1,905 @@ +package gkehub + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + "google.golang.org/api/googleapi" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type Feature struct { + Name *string `json:"name"` + Labels map[string]string `json:"labels"` + ResourceState *FeatureResourceState `json:"resourceState"` + Spec *FeatureSpec `json:"spec"` + State *FeatureState `json:"state"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + DeleteTime *string `json:"deleteTime"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *Feature) String() string { + return dcl.SprintResource(r) +} + +// The enum FeatureResourceStateStateEnum. +type FeatureResourceStateStateEnum string + +// FeatureResourceStateStateEnumRef returns a *FeatureResourceStateStateEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureResourceStateStateEnumRef(s string) *FeatureResourceStateStateEnum { + v := FeatureResourceStateStateEnum(s) + return &v +} + +func (v FeatureResourceStateStateEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STATE_UNSPECIFIED", "ENABLING", "ACTIVE", "DISABLING", "UPDATING", "SERVICE_UPDATING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureResourceStateStateEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum. +type FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum string + +// FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef returns a *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef(s string) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { + v := FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(s) + return &v +} + +func (v FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MODE_UNSPECIFIED", "COPY", "MOVE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum. +type FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum string + +// FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef returns a *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef(s string) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { + v := FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(s) + return &v +} + +func (v FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MODE_UNSPECIFIED", "COPY", "MOVE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureStateStateCodeEnum. +type FeatureStateStateCodeEnum string + +// FeatureStateStateCodeEnumRef returns a *FeatureStateStateCodeEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureStateStateCodeEnumRef(s string) *FeatureStateStateCodeEnum { + v := FeatureStateStateCodeEnum(s) + return &v +} + +func (v FeatureStateStateCodeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CODE_UNSPECIFIED", "OK", "WARNING", "ERROR"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureStateStateCodeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type FeatureResourceState struct { + empty bool `json:"-"` + State *FeatureResourceStateStateEnum `json:"state"` + HasResources *bool `json:"hasResources"` +} + +type jsonFeatureResourceState FeatureResourceState + +func (r *FeatureResourceState) UnmarshalJSON(data []byte) error { + var res jsonFeatureResourceState + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureResourceState + } else { + + r.State = res.State + + r.HasResources = res.HasResources + + } + return nil +} + +// This object is used to assert a desired state where this FeatureResourceState is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureResourceState *FeatureResourceState = &FeatureResourceState{empty: true} + +func (r *FeatureResourceState) Empty() bool { + return r.empty +} + +func (r *FeatureResourceState) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureResourceState) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpec struct { + empty bool `json:"-"` + Multiclusteringress *FeatureSpecMulticlusteringress `json:"multiclusteringress"` + Fleetobservability *FeatureSpecFleetobservability `json:"fleetobservability"` +} + +type jsonFeatureSpec FeatureSpec + +func (r *FeatureSpec) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpec + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpec + } else { + + r.Multiclusteringress = res.Multiclusteringress + + r.Fleetobservability = res.Fleetobservability + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpec is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpec *FeatureSpec = &FeatureSpec{empty: true} + +func (r *FeatureSpec) Empty() bool { + return r.empty +} + +func (r *FeatureSpec) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpec) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpecMulticlusteringress struct { + empty bool `json:"-"` + ConfigMembership *string `json:"configMembership"` +} + +type jsonFeatureSpecMulticlusteringress FeatureSpecMulticlusteringress + +func (r *FeatureSpecMulticlusteringress) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpecMulticlusteringress + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpecMulticlusteringress + } else { + + r.ConfigMembership = res.ConfigMembership + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpecMulticlusteringress is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpecMulticlusteringress *FeatureSpecMulticlusteringress = &FeatureSpecMulticlusteringress{empty: true} + +func (r *FeatureSpecMulticlusteringress) Empty() bool { + return r.empty +} + +func (r *FeatureSpecMulticlusteringress) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpecMulticlusteringress) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpecFleetobservability struct { + empty bool `json:"-"` + LoggingConfig *FeatureSpecFleetobservabilityLoggingConfig `json:"loggingConfig"` +} + +type jsonFeatureSpecFleetobservability FeatureSpecFleetobservability + +func (r *FeatureSpecFleetobservability) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpecFleetobservability + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpecFleetobservability + } else { + + r.LoggingConfig = res.LoggingConfig + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpecFleetobservability is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpecFleetobservability *FeatureSpecFleetobservability = &FeatureSpecFleetobservability{empty: true} + +func (r *FeatureSpecFleetobservability) Empty() bool { + return r.empty +} + +func (r *FeatureSpecFleetobservability) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpecFleetobservability) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpecFleetobservabilityLoggingConfig struct { + empty bool `json:"-"` + DefaultConfig *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig `json:"defaultConfig"` + FleetScopeLogsConfig *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig `json:"fleetScopeLogsConfig"` +} + +type jsonFeatureSpecFleetobservabilityLoggingConfig FeatureSpecFleetobservabilityLoggingConfig + +func (r *FeatureSpecFleetobservabilityLoggingConfig) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpecFleetobservabilityLoggingConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpecFleetobservabilityLoggingConfig + } else { + + r.DefaultConfig = res.DefaultConfig + + r.FleetScopeLogsConfig = res.FleetScopeLogsConfig + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpecFleetobservabilityLoggingConfig *FeatureSpecFleetobservabilityLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{empty: true} + +func (r *FeatureSpecFleetobservabilityLoggingConfig) Empty() bool { + return r.empty +} + +func (r *FeatureSpecFleetobservabilityLoggingConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpecFleetobservabilityLoggingConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpecFleetobservabilityLoggingConfigDefaultConfig struct { + empty bool `json:"-"` + Mode *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum `json:"mode"` +} + +type jsonFeatureSpecFleetobservabilityLoggingConfigDefaultConfig FeatureSpecFleetobservabilityLoggingConfigDefaultConfig + +func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpecFleetobservabilityLoggingConfigDefaultConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig + } else { + + r.Mode = res.Mode + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfigDefaultConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{empty: true} + +func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) Empty() bool { + return r.empty +} + +func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig struct { + empty bool `json:"-"` + Mode *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum `json:"mode"` +} + +type jsonFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + +func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) UnmarshalJSON(data []byte) error { + var res jsonFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + } else { + + r.Mode = res.Mode + + } + return nil +} + +// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{empty: true} + +func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) Empty() bool { + return r.empty +} + +func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureState struct { + empty bool `json:"-"` + State *FeatureStateState `json:"state"` +} + +type jsonFeatureState FeatureState + +func (r *FeatureState) UnmarshalJSON(data []byte) error { + var res jsonFeatureState + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureState + } else { + + r.State = res.State + + } + return nil +} + +// This object is used to assert a desired state where this FeatureState is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureState *FeatureState = &FeatureState{empty: true} + +func (r *FeatureState) Empty() bool { + return r.empty +} + +func (r *FeatureState) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureState) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureStateState struct { + empty bool `json:"-"` + Code *FeatureStateStateCodeEnum `json:"code"` + Description *string `json:"description"` + UpdateTime *string `json:"updateTime"` +} + +type jsonFeatureStateState FeatureStateState + +func (r *FeatureStateState) UnmarshalJSON(data []byte) error { + var res jsonFeatureStateState + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureStateState + } else { + + r.Code = res.Code + + r.Description = res.Description + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this FeatureStateState is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureStateState *FeatureStateState = &FeatureStateState{empty: true} + +func (r *FeatureStateState) Empty() bool { + return r.empty +} + +func (r *FeatureStateState) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureStateState) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Feature) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "gke_hub", + Type: "Feature", + Version: "beta", + } +} + +func (r *Feature) ID() (string, error) { + if err := extractFeatureFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "resource_state": dcl.ValueOrEmptyString(nr.ResourceState), + "spec": dcl.ValueOrEmptyString(nr.Spec), + "state": dcl.ValueOrEmptyString(nr.State), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const FeatureMaxPage = -1 + +type FeatureList struct { + Items []*Feature + + nextToken string + + pageSize int32 + + resource *Feature +} + +func (l *FeatureList) HasNext() bool { + return l.nextToken != "" +} + +func (l *FeatureList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listFeature(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListFeature(ctx context.Context, project, location string) (*FeatureList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListFeatureWithMaxResults(ctx, project, location, FeatureMaxPage) + +} + +func (c *Client) ListFeatureWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*FeatureList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Feature{ + Project: &project, + Location: &location, + } + items, token, err := c.listFeature(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &FeatureList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetFeature(ctx context.Context, r *Feature) (*Feature, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractFeatureFields(r) + + b, err := c.getFeatureRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalFeature(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeFeatureNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractFeatureFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteFeature(ctx context.Context, r *Feature) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Feature resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Feature...") + deleteOp := deleteFeatureOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllFeature deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllFeature(ctx context.Context, project, location string, filter func(*Feature) bool) error { + listObj, err := c.ListFeature(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllFeature(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllFeature(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyFeature(ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (*Feature, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Feature + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyFeatureHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyFeatureHelper(c *Client, ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (*Feature, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyFeature...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractFeatureFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.featureDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToFeatureDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []featureApiOperation + if create { + ops = append(ops, &createFeatureOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyFeatureDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyFeatureDiff(c *Client, ctx context.Context, desired *Feature, rawDesired *Feature, ops []featureApiOperation, opts ...dcl.ApplyOption) (*Feature, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetFeature(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createFeatureOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapFeature(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeFeatureNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeFeatureNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeFeatureDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractFeatureFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractFeatureFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffFeature(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl new file mode 100644 index 000000000000..4807c97be7a2 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl @@ -0,0 +1,3616 @@ +package gkehub + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Feature) validate() error { + + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ResourceState) { + if err := r.ResourceState.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Spec) { + if err := r.Spec.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.State) { + if err := r.State.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureResourceState) validate() error { + return nil +} +func (r *FeatureSpec) validate() error { + if !dcl.IsEmptyValueIndirect(r.Multiclusteringress) { + if err := r.Multiclusteringress.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Fleetobservability) { + if err := r.Fleetobservability.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureSpecMulticlusteringress) validate() error { + if err := dcl.Required(r, "configMembership"); err != nil { + return err + } + return nil +} +func (r *FeatureSpecFleetobservability) validate() error { + if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { + if err := r.LoggingConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureSpecFleetobservabilityLoggingConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.DefaultConfig) { + if err := r.DefaultConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.FleetScopeLogsConfig) { + if err := r.FleetScopeLogsConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) validate() error { + return nil +} +func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) validate() error { + return nil +} +func (r *FeatureState) validate() error { + if !dcl.IsEmptyValueIndirect(r.State) { + if err := r.State.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureStateState) validate() error { + return nil +} +func (r *Feature) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) +} + +// featureApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type featureApiOperation interface { + do(context.Context, *Feature, *Client) error +} + +// newUpdateFeatureUpdateFeatureRequest creates a request for an +// Feature resource's UpdateFeature update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateFeatureUpdateFeatureRequest(ctx context.Context, f *Feature, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v, err := expandFeatureSpec(c, f.Spec, res); err != nil { + return nil, fmt.Errorf("error expanding Spec into spec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["spec"] = v + } + return req, nil +} + +// marshalUpdateFeatureUpdateFeatureRequest converts the update into +// the final JSON request body. +func marshalUpdateFeatureUpdateFeatureRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateFeatureUpdateFeatureOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (c *Client) listFeatureRaw(ctx context.Context, r *Feature, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != FeatureMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listFeatureOperation struct { + Resources []map[string]interface{} `json:"resources"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listFeature(ctx context.Context, r *Feature, pageToken string, pageSize int32) ([]*Feature, string, error) { + b, err := c.listFeatureRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listFeatureOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Feature + for _, v := range m.Resources { + res, err := unmarshalMapFeature(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllFeature(ctx context.Context, f func(*Feature) bool, resources []*Feature) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteFeature(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteFeatureOperation struct{} + +func (op *deleteFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { + r, err := c.GetFeature(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Feature not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetFeature checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetFeature(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createFeatureOperation struct { + response map[string]interface{} +} + +func (op *createFeatureOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetFeature(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getFeatureRaw(ctx context.Context, r *Feature) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) featureDiffsForRawDesired(ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (initial, desired *Feature, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Feature + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Feature); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Feature, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetFeature(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Feature resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Feature resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Feature resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeFeatureDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Feature: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Feature: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractFeatureFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeFeatureInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Feature: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeFeatureDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Feature: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffFeature(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeFeatureInitialState(rawInitial, rawDesired *Feature) (*Feature, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeFeatureDesiredState(rawDesired, rawInitial *Feature, opts ...dcl.ApplyOption) (*Feature, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.ResourceState = canonicalizeFeatureResourceState(rawDesired.ResourceState, nil, opts...) + rawDesired.Spec = canonicalizeFeatureSpec(rawDesired.Spec, nil, opts...) + rawDesired.State = canonicalizeFeatureState(rawDesired.State, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Feature{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.Spec = canonicalizeFeatureSpec(rawDesired.Spec, rawInitial.Spec, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeFeatureNewState(c *Client, rawNew, rawDesired *Feature) (*Feature, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ResourceState) && dcl.IsEmptyValueIndirect(rawDesired.ResourceState) { + rawNew.ResourceState = rawDesired.ResourceState + } else { + rawNew.ResourceState = canonicalizeNewFeatureResourceState(c, rawDesired.ResourceState, rawNew.ResourceState) + } + + if dcl.IsEmptyValueIndirect(rawNew.Spec) && dcl.IsEmptyValueIndirect(rawDesired.Spec) { + rawNew.Spec = rawDesired.Spec + } else { + rawNew.Spec = canonicalizeNewFeatureSpec(c, rawDesired.Spec, rawNew.Spec) + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + rawNew.State = canonicalizeNewFeatureState(c, rawDesired.State, rawNew.State) + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { + rawNew.DeleteTime = rawDesired.DeleteTime + } else { + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeFeatureResourceState(des, initial *FeatureResourceState, opts ...dcl.ApplyOption) *FeatureResourceState { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureResourceState{} + + return cDes +} + +func canonicalizeFeatureResourceStateSlice(des, initial []FeatureResourceState, opts ...dcl.ApplyOption) []FeatureResourceState { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureResourceState, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureResourceState(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureResourceState, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureResourceState(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureResourceState(c *Client, des, nw *FeatureResourceState) *FeatureResourceState { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureResourceState while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.HasResources, nw.HasResources) { + nw.HasResources = des.HasResources + } + + return nw +} + +func canonicalizeNewFeatureResourceStateSet(c *Client, des, nw []FeatureResourceState) []FeatureResourceState { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureResourceState + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureResourceStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureResourceState(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureResourceStateSlice(c *Client, des, nw []FeatureResourceState) []FeatureResourceState { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureResourceState + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureResourceState(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpec(des, initial *FeatureSpec, opts ...dcl.ApplyOption) *FeatureSpec { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpec{} + + cDes.Multiclusteringress = canonicalizeFeatureSpecMulticlusteringress(des.Multiclusteringress, initial.Multiclusteringress, opts...) + cDes.Fleetobservability = canonicalizeFeatureSpecFleetobservability(des.Fleetobservability, initial.Fleetobservability, opts...) + + return cDes +} + +func canonicalizeFeatureSpecSlice(des, initial []FeatureSpec, opts ...dcl.ApplyOption) []FeatureSpec { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpec, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpec(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpec, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpec(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpec(c *Client, des, nw *FeatureSpec) *FeatureSpec { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpec while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Multiclusteringress = canonicalizeNewFeatureSpecMulticlusteringress(c, des.Multiclusteringress, nw.Multiclusteringress) + nw.Fleetobservability = canonicalizeNewFeatureSpecFleetobservability(c, des.Fleetobservability, nw.Fleetobservability) + + return nw +} + +func canonicalizeNewFeatureSpecSet(c *Client, des, nw []FeatureSpec) []FeatureSpec { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpec + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpec(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecSlice(c *Client, des, nw []FeatureSpec) []FeatureSpec { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpec + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpec(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpecMulticlusteringress(des, initial *FeatureSpecMulticlusteringress, opts ...dcl.ApplyOption) *FeatureSpecMulticlusteringress { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpecMulticlusteringress{} + + if dcl.IsZeroValue(des.ConfigMembership) || (dcl.IsEmptyValueIndirect(des.ConfigMembership) && dcl.IsEmptyValueIndirect(initial.ConfigMembership)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConfigMembership = initial.ConfigMembership + } else { + cDes.ConfigMembership = des.ConfigMembership + } + + return cDes +} + +func canonicalizeFeatureSpecMulticlusteringressSlice(des, initial []FeatureSpecMulticlusteringress, opts ...dcl.ApplyOption) []FeatureSpecMulticlusteringress { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpecMulticlusteringress, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpecMulticlusteringress(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpecMulticlusteringress, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpecMulticlusteringress(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpecMulticlusteringress(c *Client, des, nw *FeatureSpecMulticlusteringress) *FeatureSpecMulticlusteringress { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpecMulticlusteringress while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureSpecMulticlusteringressSet(c *Client, des, nw []FeatureSpecMulticlusteringress) []FeatureSpecMulticlusteringress { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpecMulticlusteringress + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecMulticlusteringressNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpecMulticlusteringress(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecMulticlusteringressSlice(c *Client, des, nw []FeatureSpecMulticlusteringress) []FeatureSpecMulticlusteringress { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpecMulticlusteringress + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpecMulticlusteringress(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpecFleetobservability(des, initial *FeatureSpecFleetobservability, opts ...dcl.ApplyOption) *FeatureSpecFleetobservability { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpecFleetobservability{} + + cDes.LoggingConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) + + return cDes +} + +func canonicalizeFeatureSpecFleetobservabilitySlice(des, initial []FeatureSpecFleetobservability, opts ...dcl.ApplyOption) []FeatureSpecFleetobservability { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpecFleetobservability, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpecFleetobservability(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpecFleetobservability, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpecFleetobservability(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpecFleetobservability(c *Client, des, nw *FeatureSpecFleetobservability) *FeatureSpecFleetobservability { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservability while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.LoggingConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) + + return nw +} + +func canonicalizeNewFeatureSpecFleetobservabilitySet(c *Client, des, nw []FeatureSpecFleetobservability) []FeatureSpecFleetobservability { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpecFleetobservability + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecFleetobservabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpecFleetobservability(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecFleetobservabilitySlice(c *Client, des, nw []FeatureSpecFleetobservability) []FeatureSpecFleetobservability { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpecFleetobservability + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpecFleetobservability(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpecFleetobservabilityLoggingConfig{} + + cDes.DefaultConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(des.DefaultConfig, initial.DefaultConfig, opts...) + cDes.FleetScopeLogsConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(des.FleetScopeLogsConfig, initial.FleetScopeLogsConfig, opts...) + + return cDes +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfig) *FeatureSpecFleetobservabilityLoggingConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.DefaultConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, des.DefaultConfig, nw.DefaultConfig) + nw.FleetScopeLogsConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, des.FleetScopeLogsConfig, nw.FleetScopeLogsConfig) + + return nw +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfig) []FeatureSpecFleetobservabilityLoggingConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpecFleetobservabilityLoggingConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfig) []FeatureSpecFleetobservabilityLoggingConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpecFleetobservabilityLoggingConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + + if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Mode = initial.Mode + } else { + cDes.Mode = des.Mode + } + + return cDes +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfigDefaultConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + + if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Mode = initial.Mode + } else { + cDes.Mode = des.Mode + } + + return cDes +} + +func canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureState(des, initial *FeatureState, opts ...dcl.ApplyOption) *FeatureState { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureState{} + + return cDes +} + +func canonicalizeFeatureStateSlice(des, initial []FeatureState, opts ...dcl.ApplyOption) []FeatureState { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureState, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureState(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureState, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureState(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureState(c *Client, des, nw *FeatureState) *FeatureState { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureState while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.State = canonicalizeNewFeatureStateState(c, des.State, nw.State) + + return nw +} + +func canonicalizeNewFeatureStateSet(c *Client, des, nw []FeatureState) []FeatureState { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureState + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureState(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureStateSlice(c *Client, des, nw []FeatureState) []FeatureState { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureState + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureState(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureStateState(des, initial *FeatureStateState, opts ...dcl.ApplyOption) *FeatureStateState { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureStateState{} + + return cDes +} + +func canonicalizeFeatureStateStateSlice(des, initial []FeatureStateState, opts ...dcl.ApplyOption) []FeatureStateState { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureStateState, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureStateState(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureStateState, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureStateState(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureStateState(c *Client, des, nw *FeatureStateState) *FeatureStateState { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureStateState while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Description, nw.Description) { + nw.Description = des.Description + } + if dcl.StringCanonicalize(des.UpdateTime, nw.UpdateTime) { + nw.UpdateTime = des.UpdateTime + } + + return nw +} + +func canonicalizeNewFeatureStateStateSet(c *Client, des, nw []FeatureStateState) []FeatureStateState { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureStateState + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureStateStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureStateState(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureStateStateSlice(c *Client, des, nw []FeatureStateState) []FeatureStateState { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureStateState + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureStateState(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffFeature(c *Client, desired, actual *Feature, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceState, actual.ResourceState, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureResourceStateNewStyle, EmptyObject: EmptyFeatureResourceState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Spec, actual.Spec, dcl.DiffInfo{ObjectFunction: compareFeatureSpecNewStyle, EmptyObject: EmptyFeatureSpec, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Spec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureStateNewStyle, EmptyObject: EmptyFeatureState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareFeatureResourceStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureResourceState) + if !ok { + desiredNotPointer, ok := d.(FeatureResourceState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureResourceState or *FeatureResourceState", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureResourceState) + if !ok { + actualNotPointer, ok := a.(FeatureResourceState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureResourceState", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HasResources, actual.HasResources, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HasResources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpec) + if !ok { + desiredNotPointer, ok := d.(FeatureSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpec or *FeatureSpec", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpec) + if !ok { + actualNotPointer, ok := a.(FeatureSpec) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpec", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Multiclusteringress, actual.Multiclusteringress, dcl.DiffInfo{ObjectFunction: compareFeatureSpecMulticlusteringressNewStyle, EmptyObject: EmptyFeatureSpecMulticlusteringress, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Multiclusteringress")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Fleetobservability, actual.Fleetobservability, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityNewStyle, EmptyObject: EmptyFeatureSpecFleetobservability, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Fleetobservability")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecMulticlusteringressNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpecMulticlusteringress) + if !ok { + desiredNotPointer, ok := d.(FeatureSpecMulticlusteringress) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecMulticlusteringress or *FeatureSpecMulticlusteringress", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpecMulticlusteringress) + if !ok { + actualNotPointer, ok := a.(FeatureSpecMulticlusteringress) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecMulticlusteringress", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConfigMembership, actual.ConfigMembership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("ConfigMembership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecFleetobservabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpecFleetobservability) + if !ok { + desiredNotPointer, ok := d.(FeatureSpecFleetobservability) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservability or *FeatureSpecFleetobservability", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpecFleetobservability) + if !ok { + actualNotPointer, ok := a.(FeatureSpecFleetobservability) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservability", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecFleetobservabilityLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfig) + if !ok { + desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfig or *FeatureSpecFleetobservabilityLoggingConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfig) + if !ok { + actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DefaultConfig, actual.DefaultConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("DefaultConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.FleetScopeLogsConfig, actual.FleetScopeLogsConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("FleetScopeLogsConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) + if !ok { + desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig or *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) + if !ok { + actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) + if !ok { + desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig or *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) + if !ok { + actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureState) + if !ok { + desiredNotPointer, ok := d.(FeatureState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureState or *FeatureState", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureState) + if !ok { + actualNotPointer, ok := a.(FeatureState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureState", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureStateStateNewStyle, EmptyObject: EmptyFeatureStateState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureStateStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureStateState) + if !ok { + desiredNotPointer, ok := d.(FeatureStateState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureStateState or *FeatureStateState", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureStateState) + if !ok { + actualNotPointer, ok := a.(FeatureStateState) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureStateState", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Code, actual.Code, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Code")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Feature) urlNormalized() *Feature { + normalized := dcl.Copy(*r).(Feature) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Feature) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateFeature" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Feature resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Feature) marshal(c *Client) ([]byte, error) { + m, err := expandFeature(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Feature: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalFeature decodes JSON responses into the Feature resource schema. +func unmarshalFeature(b []byte, c *Client, res *Feature) (*Feature, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapFeature(m, c, res) +} + +func unmarshalMapFeature(m map[string]interface{}, c *Client, res *Feature) (*Feature, error) { + + flattened := flattenFeature(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandFeature expands Feature into a JSON request object. +func expandFeature(c *Client, f *Feature) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/features/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandFeatureSpec(c, f.Spec, res); err != nil { + return nil, fmt.Errorf("error expanding Spec into spec: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["spec"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenFeature flattens Feature from a JSON request object into the +// Feature type. +func flattenFeature(c *Client, i interface{}, res *Feature) *Feature { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Feature{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.ResourceState = flattenFeatureResourceState(c, m["resourceState"], res) + resultRes.Spec = flattenFeatureSpec(c, m["spec"], res) + resultRes.State = flattenFeatureState(c, m["state"], res) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandFeatureResourceStateMap expands the contents of FeatureResourceState into a JSON +// request object. +func expandFeatureResourceStateMap(c *Client, f map[string]FeatureResourceState, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureResourceState(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureResourceStateSlice expands the contents of FeatureResourceState into a JSON +// request object. +func expandFeatureResourceStateSlice(c *Client, f []FeatureResourceState, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureResourceState(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureResourceStateMap flattens the contents of FeatureResourceState from a JSON +// response object. +func flattenFeatureResourceStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureResourceState { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureResourceState{} + } + + if len(a) == 0 { + return map[string]FeatureResourceState{} + } + + items := make(map[string]FeatureResourceState) + for k, item := range a { + items[k] = *flattenFeatureResourceState(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureResourceStateSlice flattens the contents of FeatureResourceState from a JSON +// response object. +func flattenFeatureResourceStateSlice(c *Client, i interface{}, res *Feature) []FeatureResourceState { + a, ok := i.([]interface{}) + if !ok { + return []FeatureResourceState{} + } + + if len(a) == 0 { + return []FeatureResourceState{} + } + + items := make([]FeatureResourceState, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureResourceState(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureResourceState expands an instance of FeatureResourceState into a JSON +// request object. +func expandFeatureResourceState(c *Client, f *FeatureResourceState, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenFeatureResourceState flattens an instance of FeatureResourceState from a JSON +// response object. +func flattenFeatureResourceState(c *Client, i interface{}, res *Feature) *FeatureResourceState { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureResourceState{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureResourceState + } + r.State = flattenFeatureResourceStateStateEnum(m["state"]) + r.HasResources = dcl.FlattenBool(m["hasResources"]) + + return r +} + +// expandFeatureSpecMap expands the contents of FeatureSpec into a JSON +// request object. +func expandFeatureSpecMap(c *Client, f map[string]FeatureSpec, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpec(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecSlice expands the contents of FeatureSpec into a JSON +// request object. +func expandFeatureSpecSlice(c *Client, f []FeatureSpec, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpec(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecMap flattens the contents of FeatureSpec from a JSON +// response object. +func flattenFeatureSpecMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpec { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpec{} + } + + if len(a) == 0 { + return map[string]FeatureSpec{} + } + + items := make(map[string]FeatureSpec) + for k, item := range a { + items[k] = *flattenFeatureSpec(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecSlice flattens the contents of FeatureSpec from a JSON +// response object. +func flattenFeatureSpecSlice(c *Client, i interface{}, res *Feature) []FeatureSpec { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpec{} + } + + if len(a) == 0 { + return []FeatureSpec{} + } + + items := make([]FeatureSpec, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpec(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpec expands an instance of FeatureSpec into a JSON +// request object. +func expandFeatureSpec(c *Client, f *FeatureSpec, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureSpecMulticlusteringress(c, f.Multiclusteringress, res); err != nil { + return nil, fmt.Errorf("error expanding Multiclusteringress into multiclusteringress: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["multiclusteringress"] = v + } + if v, err := expandFeatureSpecFleetobservability(c, f.Fleetobservability, res); err != nil { + return nil, fmt.Errorf("error expanding Fleetobservability into fleetobservability: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleetobservability"] = v + } + + return m, nil +} + +// flattenFeatureSpec flattens an instance of FeatureSpec from a JSON +// response object. +func flattenFeatureSpec(c *Client, i interface{}, res *Feature) *FeatureSpec { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpec{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpec + } + r.Multiclusteringress = flattenFeatureSpecMulticlusteringress(c, m["multiclusteringress"], res) + r.Fleetobservability = flattenFeatureSpecFleetobservability(c, m["fleetobservability"], res) + + return r +} + +// expandFeatureSpecMulticlusteringressMap expands the contents of FeatureSpecMulticlusteringress into a JSON +// request object. +func expandFeatureSpecMulticlusteringressMap(c *Client, f map[string]FeatureSpecMulticlusteringress, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpecMulticlusteringress(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecMulticlusteringressSlice expands the contents of FeatureSpecMulticlusteringress into a JSON +// request object. +func expandFeatureSpecMulticlusteringressSlice(c *Client, f []FeatureSpecMulticlusteringress, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpecMulticlusteringress(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecMulticlusteringressMap flattens the contents of FeatureSpecMulticlusteringress from a JSON +// response object. +func flattenFeatureSpecMulticlusteringressMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecMulticlusteringress { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecMulticlusteringress{} + } + + if len(a) == 0 { + return map[string]FeatureSpecMulticlusteringress{} + } + + items := make(map[string]FeatureSpecMulticlusteringress) + for k, item := range a { + items[k] = *flattenFeatureSpecMulticlusteringress(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecMulticlusteringressSlice flattens the contents of FeatureSpecMulticlusteringress from a JSON +// response object. +func flattenFeatureSpecMulticlusteringressSlice(c *Client, i interface{}, res *Feature) []FeatureSpecMulticlusteringress { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecMulticlusteringress{} + } + + if len(a) == 0 { + return []FeatureSpecMulticlusteringress{} + } + + items := make([]FeatureSpecMulticlusteringress, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecMulticlusteringress(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpecMulticlusteringress expands an instance of FeatureSpecMulticlusteringress into a JSON +// request object. +func expandFeatureSpecMulticlusteringress(c *Client, f *FeatureSpecMulticlusteringress, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ConfigMembership; !dcl.IsEmptyValueIndirect(v) { + m["configMembership"] = v + } + + return m, nil +} + +// flattenFeatureSpecMulticlusteringress flattens an instance of FeatureSpecMulticlusteringress from a JSON +// response object. +func flattenFeatureSpecMulticlusteringress(c *Client, i interface{}, res *Feature) *FeatureSpecMulticlusteringress { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpecMulticlusteringress{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpecMulticlusteringress + } + r.ConfigMembership = dcl.FlattenString(m["configMembership"]) + + return r +} + +// expandFeatureSpecFleetobservabilityMap expands the contents of FeatureSpecFleetobservability into a JSON +// request object. +func expandFeatureSpecFleetobservabilityMap(c *Client, f map[string]FeatureSpecFleetobservability, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpecFleetobservability(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecFleetobservabilitySlice expands the contents of FeatureSpecFleetobservability into a JSON +// request object. +func expandFeatureSpecFleetobservabilitySlice(c *Client, f []FeatureSpecFleetobservability, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpecFleetobservability(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecFleetobservabilityMap flattens the contents of FeatureSpecFleetobservability from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservability { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservability{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservability{} + } + + items := make(map[string]FeatureSpecFleetobservability) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservability(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecFleetobservabilitySlice flattens the contents of FeatureSpecFleetobservability from a JSON +// response object. +func flattenFeatureSpecFleetobservabilitySlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservability { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservability{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservability{} + } + + items := make([]FeatureSpecFleetobservability, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservability(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpecFleetobservability expands an instance of FeatureSpecFleetobservability into a JSON +// request object. +func expandFeatureSpecFleetobservability(c *Client, f *FeatureSpecFleetobservability, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, f.LoggingConfig, res); err != nil { + return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["loggingConfig"] = v + } + + return m, nil +} + +// flattenFeatureSpecFleetobservability flattens an instance of FeatureSpecFleetobservability from a JSON +// response object. +func flattenFeatureSpecFleetobservability(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservability { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpecFleetobservability{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpecFleetobservability + } + r.LoggingConfig = flattenFeatureSpecFleetobservabilityLoggingConfig(c, m["loggingConfig"], res) + + return r +} + +// expandFeatureSpecFleetobservabilityLoggingConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfig, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecFleetobservabilityLoggingConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfig, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservabilityLoggingConfig{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservabilityLoggingConfig{} + } + + items := make(map[string]FeatureSpecFleetobservabilityLoggingConfig) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfig { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservabilityLoggingConfig{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservabilityLoggingConfig{} + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpecFleetobservabilityLoggingConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfig, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, f.DefaultConfig, res); err != nil { + return nil, fmt.Errorf("error expanding DefaultConfig into defaultConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["defaultConfig"] = v + } + if v, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, f.FleetScopeLogsConfig, res); err != nil { + return nil, fmt.Errorf("error expanding FleetScopeLogsConfig into fleetScopeLogsConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["fleetScopeLogsConfig"] = v + } + + return m, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpecFleetobservabilityLoggingConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpecFleetobservabilityLoggingConfig + } + r.DefaultConfig = flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, m["defaultConfig"], res) + r.FleetScopeLogsConfig = flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, m["fleetScopeLogsConfig"], res) + + return r +} + +// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + + items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { + m["mode"] = v + } + + return m, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig + } + r.Mode = flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(m["mode"]) + + return r +} + +// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + + items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON +// request object. +func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { + m["mode"] = v + } + + return m, nil +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig + } + r.Mode = flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(m["mode"]) + + return r +} + +// expandFeatureStateMap expands the contents of FeatureState into a JSON +// request object. +func expandFeatureStateMap(c *Client, f map[string]FeatureState, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureState(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureStateSlice expands the contents of FeatureState into a JSON +// request object. +func expandFeatureStateSlice(c *Client, f []FeatureState, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureState(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureStateMap flattens the contents of FeatureState from a JSON +// response object. +func flattenFeatureStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureState { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureState{} + } + + if len(a) == 0 { + return map[string]FeatureState{} + } + + items := make(map[string]FeatureState) + for k, item := range a { + items[k] = *flattenFeatureState(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureStateSlice flattens the contents of FeatureState from a JSON +// response object. +func flattenFeatureStateSlice(c *Client, i interface{}, res *Feature) []FeatureState { + a, ok := i.([]interface{}) + if !ok { + return []FeatureState{} + } + + if len(a) == 0 { + return []FeatureState{} + } + + items := make([]FeatureState, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureState(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureState expands an instance of FeatureState into a JSON +// request object. +func expandFeatureState(c *Client, f *FeatureState, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenFeatureState flattens an instance of FeatureState from a JSON +// response object. +func flattenFeatureState(c *Client, i interface{}, res *Feature) *FeatureState { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureState{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureState + } + r.State = flattenFeatureStateState(c, m["state"], res) + + return r +} + +// expandFeatureStateStateMap expands the contents of FeatureStateState into a JSON +// request object. +func expandFeatureStateStateMap(c *Client, f map[string]FeatureStateState, res *Feature) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureStateState(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureStateStateSlice expands the contents of FeatureStateState into a JSON +// request object. +func expandFeatureStateStateSlice(c *Client, f []FeatureStateState, res *Feature) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureStateState(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureStateStateMap flattens the contents of FeatureStateState from a JSON +// response object. +func flattenFeatureStateStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureStateState { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureStateState{} + } + + if len(a) == 0 { + return map[string]FeatureStateState{} + } + + items := make(map[string]FeatureStateState) + for k, item := range a { + items[k] = *flattenFeatureStateState(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureStateStateSlice flattens the contents of FeatureStateState from a JSON +// response object. +func flattenFeatureStateStateSlice(c *Client, i interface{}, res *Feature) []FeatureStateState { + a, ok := i.([]interface{}) + if !ok { + return []FeatureStateState{} + } + + if len(a) == 0 { + return []FeatureStateState{} + } + + items := make([]FeatureStateState, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureStateState(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureStateState expands an instance of FeatureStateState into a JSON +// request object. +func expandFeatureStateState(c *Client, f *FeatureStateState, res *Feature) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenFeatureStateState flattens an instance of FeatureStateState from a JSON +// response object. +func flattenFeatureStateState(c *Client, i interface{}, res *Feature) *FeatureStateState { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureStateState{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureStateState + } + r.Code = flattenFeatureStateStateCodeEnum(m["code"]) + r.Description = dcl.FlattenString(m["description"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// flattenFeatureResourceStateStateEnumMap flattens the contents of FeatureResourceStateStateEnum from a JSON +// response object. +func flattenFeatureResourceStateStateEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureResourceStateStateEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureResourceStateStateEnum{} + } + + if len(a) == 0 { + return map[string]FeatureResourceStateStateEnum{} + } + + items := make(map[string]FeatureResourceStateStateEnum) + for k, item := range a { + items[k] = *flattenFeatureResourceStateStateEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureResourceStateStateEnumSlice flattens the contents of FeatureResourceStateStateEnum from a JSON +// response object. +func flattenFeatureResourceStateStateEnumSlice(c *Client, i interface{}, res *Feature) []FeatureResourceStateStateEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureResourceStateStateEnum{} + } + + if len(a) == 0 { + return []FeatureResourceStateStateEnum{} + } + + items := make([]FeatureResourceStateStateEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureResourceStateStateEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureResourceStateStateEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureResourceStateStateEnum with the same value as that string. +func flattenFeatureResourceStateStateEnum(i interface{}) *FeatureResourceStateStateEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureResourceStateStateEnumRef(s) +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} + } + + items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum with the same value as that string. +func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(i interface{}) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef(s) +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} + } + + if len(a) == 0 { + return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} + } + + items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) + for k, item := range a { + items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum from a JSON +// response object. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} + } + + if len(a) == 0 { + return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} + } + + items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum with the same value as that string. +func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(i interface{}) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef(s) +} + +// flattenFeatureStateStateCodeEnumMap flattens the contents of FeatureStateStateCodeEnum from a JSON +// response object. +func flattenFeatureStateStateCodeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureStateStateCodeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureStateStateCodeEnum{} + } + + if len(a) == 0 { + return map[string]FeatureStateStateCodeEnum{} + } + + items := make(map[string]FeatureStateStateCodeEnum) + for k, item := range a { + items[k] = *flattenFeatureStateStateCodeEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureStateStateCodeEnumSlice flattens the contents of FeatureStateStateCodeEnum from a JSON +// response object. +func flattenFeatureStateStateCodeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureStateStateCodeEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureStateStateCodeEnum{} + } + + if len(a) == 0 { + return []FeatureStateStateCodeEnum{} + } + + items := make([]FeatureStateStateCodeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureStateStateCodeEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureStateStateCodeEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureStateStateCodeEnum with the same value as that string. +func flattenFeatureStateStateCodeEnum(i interface{}) *FeatureStateStateCodeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureStateStateCodeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Feature) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalFeature(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type featureDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp featureApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToFeatureDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]featureDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []featureDiff + // For each operation name, create a featureDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := featureDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToFeatureApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToFeatureApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (featureApiOperation, error) { + switch opName { + + case "updateFeatureUpdateFeatureOperation": + return &updateFeatureUpdateFeatureOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractFeatureFields(r *Feature) error { + vResourceState := r.ResourceState + if vResourceState == nil { + // note: explicitly not the empty object. + vResourceState = &FeatureResourceState{} + } + if err := extractFeatureResourceStateFields(r, vResourceState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceState) { + r.ResourceState = vResourceState + } + vSpec := r.Spec + if vSpec == nil { + // note: explicitly not the empty object. + vSpec = &FeatureSpec{} + } + if err := extractFeatureSpecFields(r, vSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpec) { + r.Spec = vSpec + } + vState := r.State + if vState == nil { + // note: explicitly not the empty object. + vState = &FeatureState{} + } + if err := extractFeatureStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + r.State = vState + } + return nil +} +func extractFeatureResourceStateFields(r *Feature, o *FeatureResourceState) error { + return nil +} +func extractFeatureSpecFields(r *Feature, o *FeatureSpec) error { + vMulticlusteringress := o.Multiclusteringress + if vMulticlusteringress == nil { + // note: explicitly not the empty object. + vMulticlusteringress = &FeatureSpecMulticlusteringress{} + } + if err := extractFeatureSpecMulticlusteringressFields(r, vMulticlusteringress); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMulticlusteringress) { + o.Multiclusteringress = vMulticlusteringress + } + vFleetobservability := o.Fleetobservability + if vFleetobservability == nil { + // note: explicitly not the empty object. + vFleetobservability = &FeatureSpecFleetobservability{} + } + if err := extractFeatureSpecFleetobservabilityFields(r, vFleetobservability); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleetobservability) { + o.Fleetobservability = vFleetobservability + } + return nil +} +func extractFeatureSpecMulticlusteringressFields(r *Feature, o *FeatureSpecMulticlusteringress) error { + return nil +} +func extractFeatureSpecFleetobservabilityFields(r *Feature, o *FeatureSpecFleetobservability) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func extractFeatureSpecFleetobservabilityLoggingConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfig) error { + vDefaultConfig := o.DefaultConfig + if vDefaultConfig == nil { + // note: explicitly not the empty object. + vDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r, vDefaultConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDefaultConfig) { + o.DefaultConfig = vDefaultConfig + } + vFleetScopeLogsConfig := o.FleetScopeLogsConfig + if vFleetScopeLogsConfig == nil { + // note: explicitly not the empty object. + vFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r, vFleetScopeLogsConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleetScopeLogsConfig) { + o.FleetScopeLogsConfig = vFleetScopeLogsConfig + } + return nil +} +func extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) error { + return nil +} +func extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) error { + return nil +} +func extractFeatureStateFields(r *Feature, o *FeatureState) error { + vState := o.State + if vState == nil { + // note: explicitly not the empty object. + vState = &FeatureStateState{} + } + if err := extractFeatureStateStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + o.State = vState + } + return nil +} +func extractFeatureStateStateFields(r *Feature, o *FeatureStateState) error { + return nil +} + +func postReadExtractFeatureFields(r *Feature) error { + vResourceState := r.ResourceState + if vResourceState == nil { + // note: explicitly not the empty object. + vResourceState = &FeatureResourceState{} + } + if err := postReadExtractFeatureResourceStateFields(r, vResourceState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceState) { + r.ResourceState = vResourceState + } + vSpec := r.Spec + if vSpec == nil { + // note: explicitly not the empty object. + vSpec = &FeatureSpec{} + } + if err := postReadExtractFeatureSpecFields(r, vSpec); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vSpec) { + r.Spec = vSpec + } + vState := r.State + if vState == nil { + // note: explicitly not the empty object. + vState = &FeatureState{} + } + if err := postReadExtractFeatureStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + r.State = vState + } + return nil +} +func postReadExtractFeatureResourceStateFields(r *Feature, o *FeatureResourceState) error { + return nil +} +func postReadExtractFeatureSpecFields(r *Feature, o *FeatureSpec) error { + vMulticlusteringress := o.Multiclusteringress + if vMulticlusteringress == nil { + // note: explicitly not the empty object. + vMulticlusteringress = &FeatureSpecMulticlusteringress{} + } + if err := extractFeatureSpecMulticlusteringressFields(r, vMulticlusteringress); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMulticlusteringress) { + o.Multiclusteringress = vMulticlusteringress + } + vFleetobservability := o.Fleetobservability + if vFleetobservability == nil { + // note: explicitly not the empty object. + vFleetobservability = &FeatureSpecFleetobservability{} + } + if err := extractFeatureSpecFleetobservabilityFields(r, vFleetobservability); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleetobservability) { + o.Fleetobservability = vFleetobservability + } + return nil +} +func postReadExtractFeatureSpecMulticlusteringressFields(r *Feature, o *FeatureSpecMulticlusteringress) error { + return nil +} +func postReadExtractFeatureSpecFleetobservabilityFields(r *Feature, o *FeatureSpecFleetobservability) error { + vLoggingConfig := o.LoggingConfig + if vLoggingConfig == nil { + // note: explicitly not the empty object. + vLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigFields(r, vLoggingConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLoggingConfig) { + o.LoggingConfig = vLoggingConfig + } + return nil +} +func postReadExtractFeatureSpecFleetobservabilityLoggingConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfig) error { + vDefaultConfig := o.DefaultConfig + if vDefaultConfig == nil { + // note: explicitly not the empty object. + vDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r, vDefaultConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vDefaultConfig) { + o.DefaultConfig = vDefaultConfig + } + vFleetScopeLogsConfig := o.FleetScopeLogsConfig + if vFleetScopeLogsConfig == nil { + // note: explicitly not the empty object. + vFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} + } + if err := extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r, vFleetScopeLogsConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vFleetScopeLogsConfig) { + o.FleetScopeLogsConfig = vFleetScopeLogsConfig + } + return nil +} +func postReadExtractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) error { + return nil +} +func postReadExtractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) error { + return nil +} +func postReadExtractFeatureStateFields(r *Feature, o *FeatureState) error { + vState := o.State + if vState == nil { + // note: explicitly not the empty object. + vState = &FeatureStateState{} + } + if err := extractFeatureStateStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + o.State = vState + } + return nil +} +func postReadExtractFeatureStateStateFields(r *Feature, o *FeatureStateState) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl new file mode 100644 index 000000000000..b863f3549e60 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_membership.go.tmpl @@ -0,0 +1,1643 @@ +package gkehub + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type FeatureMembership struct { + Mesh *FeatureMembershipMesh `json:"mesh"` + Configmanagement *FeatureMembershipConfigmanagement `json:"configmanagement"` + Policycontroller *FeatureMembershipPolicycontroller `json:"policycontroller"` + Project *string `json:"project"` + Location *string `json:"location"` + Feature *string `json:"feature"` + Membership *string `json:"membership"` + MembershipLocation *string `json:"membershipLocation"` +} + +func (r *FeatureMembership) String() string { + return dcl.SprintResource(r) +} + +// The enum FeatureMembershipMeshManagementEnum. +type FeatureMembershipMeshManagementEnum string + +// FeatureMembershipMeshManagementEnumRef returns a *FeatureMembershipMeshManagementEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipMeshManagementEnumRef(s string) *FeatureMembershipMeshManagementEnum { + v := FeatureMembershipMeshManagementEnum(s) + return &v +} + +func (v FeatureMembershipMeshManagementEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipMeshManagementEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipMeshControlPlaneEnum. +type FeatureMembershipMeshControlPlaneEnum string + +// FeatureMembershipMeshControlPlaneEnumRef returns a *FeatureMembershipMeshControlPlaneEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipMeshControlPlaneEnumRef(s string) *FeatureMembershipMeshControlPlaneEnum { + v := FeatureMembershipMeshControlPlaneEnum(s) + return &v +} + +func (v FeatureMembershipMeshControlPlaneEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CONTROL_PLANE_MANAGEMENT_UNSPECIFIED", "AUTOMATIC", "MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipMeshControlPlaneEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum. +type FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum string + +// FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef returns a *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(s string) *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + v := FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(s) + return &v +} + +func (v FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MONITORING_BACKEND_UNSPECIFIED", "PROMETHEUS", "CLOUD_MONITORING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipConfigmanagementManagementEnum. +type FeatureMembershipConfigmanagementManagementEnum string + +// FeatureMembershipConfigmanagementManagementEnumRef returns a *FeatureMembershipConfigmanagementManagementEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipConfigmanagementManagementEnumRef(s string) *FeatureMembershipConfigmanagementManagementEnum { + v := FeatureMembershipConfigmanagementManagementEnum(s) + return &v +} + +func (v FeatureMembershipConfigmanagementManagementEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MANAGEMENT_UNSPECIFIED", "MANAGEMENT_AUTOMATIC", "MANAGEMENT_MANUAL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipConfigmanagementManagementEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"INSTALL_SPEC_UNSPECIFIED", "INSTALL_SPEC_NOT_INSTALLED", "INSTALL_SPEC_ENABLED", "INSTALL_SPEC_SUSPENDED", "INSTALL_SPEC_DETACHED"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"MONITORING_BACKEND_UNSPECIFIED", "PROMETHEUS", "CLOUD_MONITORING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"INSTALLATION_UNSPECIFIED", "NOT_INSTALLED", "ALL"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum. +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum string + +// FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef returns a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum with the value of string s +// If the empty string is provided, nil is returned. +func FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(s string) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + v := FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(s) + return &v +} + +func (v FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"AFFINITY_UNSPECIFIED", "NO_AFFINITY", "ANTI_AFFINITY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum", + Value: string(v), + Valid: []string{}, + } +} + +type FeatureMembershipMesh struct { + empty bool `json:"-"` + Management *FeatureMembershipMeshManagementEnum `json:"management"` + ControlPlane *FeatureMembershipMeshControlPlaneEnum `json:"controlPlane"` +} + +type jsonFeatureMembershipMesh FeatureMembershipMesh + +func (r *FeatureMembershipMesh) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipMesh + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipMesh + } else { + + r.Management = res.Management + + r.ControlPlane = res.ControlPlane + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipMesh is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipMesh *FeatureMembershipMesh = &FeatureMembershipMesh{empty: true} + +func (r *FeatureMembershipMesh) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipMesh) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipMesh) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagement struct { + empty bool `json:"-"` + ConfigSync *FeatureMembershipConfigmanagementConfigSync `json:"configSync"` + PolicyController *FeatureMembershipConfigmanagementPolicyController `json:"policyController"` + HierarchyController *FeatureMembershipConfigmanagementHierarchyController `json:"hierarchyController"` + Version *string `json:"version"` + Management *FeatureMembershipConfigmanagementManagementEnum `json:"management"` +} + +type jsonFeatureMembershipConfigmanagement FeatureMembershipConfigmanagement + +func (r *FeatureMembershipConfigmanagement) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagement + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagement + } else { + + r.ConfigSync = res.ConfigSync + + r.PolicyController = res.PolicyController + + r.HierarchyController = res.HierarchyController + + r.Version = res.Version + + r.Management = res.Management + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagement is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagement *FeatureMembershipConfigmanagement = &FeatureMembershipConfigmanagement{empty: true} + +func (r *FeatureMembershipConfigmanagement) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagement) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagement) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSync struct { + empty bool `json:"-"` + DeploymentOverrides []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides `json:"deploymentOverrides"` + Git *FeatureMembershipConfigmanagementConfigSyncGit `json:"git"` + SourceFormat *string `json:"sourceFormat"` + Enabled *bool `json:"enabled"` + StopSyncing *bool `json:"stopSyncing"` + PreventDrift *bool `json:"preventDrift"` + MetricsGcpServiceAccountEmail *string `json:"metricsGcpServiceAccountEmail"` + Oci *FeatureMembershipConfigmanagementConfigSyncOci `json:"oci"` +} + +type jsonFeatureMembershipConfigmanagementConfigSync FeatureMembershipConfigmanagementConfigSync + +func (r *FeatureMembershipConfigmanagementConfigSync) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSync + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSync + } else { + + r.DeploymentOverrides = res.DeploymentOverrides + + r.Git = res.Git + + r.SourceFormat = res.SourceFormat + + r.Enabled = res.Enabled + + r.StopSyncing = res.StopSyncing + + r.PreventDrift = res.PreventDrift + + r.MetricsGcpServiceAccountEmail = res.MetricsGcpServiceAccountEmail + + r.Oci = res.Oci + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSync is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSync *FeatureMembershipConfigmanagementConfigSync = &FeatureMembershipConfigmanagementConfigSync{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSync) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSync) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSync) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides struct { + empty bool `json:"-"` + DeploymentName *string `json:"deploymentName"` + DeploymentNamespace *string `json:"deploymentNamespace"` + Containers []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers `json:"containers"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } else { + + r.DeploymentName = res.DeploymentName + + r.DeploymentNamespace = res.DeploymentNamespace + + r.Containers = res.Containers + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides = &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers struct { + empty bool `json:"-"` + ContainerName *string `json:"containerName"` + CpuRequest *string `json:"cpuRequest"` + MemoryRequest *string `json:"memoryRequest"` + CpuLimit *string `json:"cpuLimit"` + MemoryLimit *string `json:"memoryLimit"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } else { + + r.ContainerName = res.ContainerName + + r.CpuRequest = res.CpuRequest + + r.MemoryRequest = res.MemoryRequest + + r.CpuLimit = res.CpuLimit + + r.MemoryLimit = res.MemoryLimit + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers = &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncGit struct { + empty bool `json:"-"` + SyncRepo *string `json:"syncRepo"` + SyncBranch *string `json:"syncBranch"` + PolicyDir *string `json:"policyDir"` + SyncWaitSecs *string `json:"syncWaitSecs"` + SyncRev *string `json:"syncRev"` + SecretType *string `json:"secretType"` + HttpsProxy *string `json:"httpsProxy"` + GcpServiceAccountEmail *string `json:"gcpServiceAccountEmail"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncGit FeatureMembershipConfigmanagementConfigSyncGit + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncGit + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncGit + } else { + + r.SyncRepo = res.SyncRepo + + r.SyncBranch = res.SyncBranch + + r.PolicyDir = res.PolicyDir + + r.SyncWaitSecs = res.SyncWaitSecs + + r.SyncRev = res.SyncRev + + r.SecretType = res.SecretType + + r.HttpsProxy = res.HttpsProxy + + r.GcpServiceAccountEmail = res.GcpServiceAccountEmail + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncGit is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncGit *FeatureMembershipConfigmanagementConfigSyncGit = &FeatureMembershipConfigmanagementConfigSyncGit{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncGit) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementConfigSyncOci struct { + empty bool `json:"-"` + SyncRepo *string `json:"syncRepo"` + PolicyDir *string `json:"policyDir"` + SyncWaitSecs *string `json:"syncWaitSecs"` + SecretType *string `json:"secretType"` + GcpServiceAccountEmail *string `json:"gcpServiceAccountEmail"` +} + +type jsonFeatureMembershipConfigmanagementConfigSyncOci FeatureMembershipConfigmanagementConfigSyncOci + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementConfigSyncOci + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementConfigSyncOci + } else { + + r.SyncRepo = res.SyncRepo + + r.PolicyDir = res.PolicyDir + + r.SyncWaitSecs = res.SyncWaitSecs + + r.SecretType = res.SecretType + + r.GcpServiceAccountEmail = res.GcpServiceAccountEmail + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementConfigSyncOci is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementConfigSyncOci *FeatureMembershipConfigmanagementConfigSyncOci = &FeatureMembershipConfigmanagementConfigSyncOci{empty: true} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementConfigSyncOci) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementPolicyController struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + ExemptableNamespaces []string `json:"exemptableNamespaces"` + ReferentialRulesEnabled *bool `json:"referentialRulesEnabled"` + LogDeniesEnabled *bool `json:"logDeniesEnabled"` + MutationEnabled *bool `json:"mutationEnabled"` + Monitoring *FeatureMembershipConfigmanagementPolicyControllerMonitoring `json:"monitoring"` + TemplateLibraryInstalled *bool `json:"templateLibraryInstalled"` + AuditIntervalSeconds *string `json:"auditIntervalSeconds"` +} + +type jsonFeatureMembershipConfigmanagementPolicyController FeatureMembershipConfigmanagementPolicyController + +func (r *FeatureMembershipConfigmanagementPolicyController) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementPolicyController + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementPolicyController + } else { + + r.Enabled = res.Enabled + + r.ExemptableNamespaces = res.ExemptableNamespaces + + r.ReferentialRulesEnabled = res.ReferentialRulesEnabled + + r.LogDeniesEnabled = res.LogDeniesEnabled + + r.MutationEnabled = res.MutationEnabled + + r.Monitoring = res.Monitoring + + r.TemplateLibraryInstalled = res.TemplateLibraryInstalled + + r.AuditIntervalSeconds = res.AuditIntervalSeconds + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementPolicyController is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementPolicyController *FeatureMembershipConfigmanagementPolicyController = &FeatureMembershipConfigmanagementPolicyController{empty: true} + +func (r *FeatureMembershipConfigmanagementPolicyController) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementPolicyController) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementPolicyController) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementPolicyControllerMonitoring struct { + empty bool `json:"-"` + Backends []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum `json:"backends"` +} + +type jsonFeatureMembershipConfigmanagementPolicyControllerMonitoring FeatureMembershipConfigmanagementPolicyControllerMonitoring + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementPolicyControllerMonitoring + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring + } else { + + r.Backends = res.Backends + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementPolicyControllerMonitoring is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring *FeatureMembershipConfigmanagementPolicyControllerMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{empty: true} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipConfigmanagementHierarchyController struct { + empty bool `json:"-"` + Enabled *bool `json:"enabled"` + EnablePodTreeLabels *bool `json:"enablePodTreeLabels"` + EnableHierarchicalResourceQuota *bool `json:"enableHierarchicalResourceQuota"` +} + +type jsonFeatureMembershipConfigmanagementHierarchyController FeatureMembershipConfigmanagementHierarchyController + +func (r *FeatureMembershipConfigmanagementHierarchyController) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipConfigmanagementHierarchyController + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipConfigmanagementHierarchyController + } else { + + r.Enabled = res.Enabled + + r.EnablePodTreeLabels = res.EnablePodTreeLabels + + r.EnableHierarchicalResourceQuota = res.EnableHierarchicalResourceQuota + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipConfigmanagementHierarchyController is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipConfigmanagementHierarchyController *FeatureMembershipConfigmanagementHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{empty: true} + +func (r *FeatureMembershipConfigmanagementHierarchyController) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipConfigmanagementHierarchyController) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipConfigmanagementHierarchyController) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontroller struct { + empty bool `json:"-"` + Version *string `json:"version"` + PolicyControllerHubConfig *FeatureMembershipPolicycontrollerPolicyControllerHubConfig `json:"policyControllerHubConfig"` +} + +type jsonFeatureMembershipPolicycontroller FeatureMembershipPolicycontroller + +func (r *FeatureMembershipPolicycontroller) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontroller + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontroller + } else { + + r.Version = res.Version + + r.PolicyControllerHubConfig = res.PolicyControllerHubConfig + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontroller is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontroller *FeatureMembershipPolicycontroller = &FeatureMembershipPolicycontroller{empty: true} + +func (r *FeatureMembershipPolicycontroller) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontroller) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontroller) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfig struct { + empty bool `json:"-"` + InstallSpec *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum `json:"installSpec"` + ExemptableNamespaces []string `json:"exemptableNamespaces"` + ReferentialRulesEnabled *bool `json:"referentialRulesEnabled"` + LogDeniesEnabled *bool `json:"logDeniesEnabled"` + MutationEnabled *bool `json:"mutationEnabled"` + Monitoring *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring `json:"monitoring"` + AuditIntervalSeconds *int64 `json:"auditIntervalSeconds"` + ConstraintViolationLimit *int64 `json:"constraintViolationLimit"` + PolicyContent *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent `json:"policyContent"` + DeploymentConfigs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs `json:"deploymentConfigs"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfig FeatureMembershipPolicycontrollerPolicyControllerHubConfig + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } else { + + r.InstallSpec = res.InstallSpec + + r.ExemptableNamespaces = res.ExemptableNamespaces + + r.ReferentialRulesEnabled = res.ReferentialRulesEnabled + + r.LogDeniesEnabled = res.LogDeniesEnabled + + r.MutationEnabled = res.MutationEnabled + + r.Monitoring = res.Monitoring + + r.AuditIntervalSeconds = res.AuditIntervalSeconds + + r.ConstraintViolationLimit = res.ConstraintViolationLimit + + r.PolicyContent = res.PolicyContent + + r.DeploymentConfigs = res.DeploymentConfigs + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig *FeatureMembershipPolicycontrollerPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring struct { + empty bool `json:"-"` + Backends []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum `json:"backends"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + } else { + + r.Backends = res.Backends + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent struct { + empty bool `json:"-"` + TemplateLibrary *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary `json:"templateLibrary"` + Bundles map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles `json:"bundles"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + } else { + + r.TemplateLibrary = res.TemplateLibrary + + r.Bundles = res.Bundles + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary struct { + empty bool `json:"-"` + Installation *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum `json:"installation"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + } else { + + r.Installation = res.Installation + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles struct { + empty bool `json:"-"` + ExemptedNamespaces []string `json:"exemptedNamespaces"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } else { + + r.ExemptedNamespaces = res.ExemptedNamespaces + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs struct { + empty bool `json:"-"` + ReplicaCount *int64 `json:"replicaCount"` + ContainerResources *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources `json:"containerResources"` + PodAffinity *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum `json:"podAffinity"` + PodTolerations []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations `json:"podTolerations"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + } else { + + r.ReplicaCount = res.ReplicaCount + + r.ContainerResources = res.ContainerResources + + r.PodAffinity = res.PodAffinity + + r.PodTolerations = res.PodTolerations + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources struct { + empty bool `json:"-"` + Limits *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits `json:"limits"` + Requests *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests `json:"requests"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } else { + + r.Limits = res.Limits + + r.Requests = res.Requests + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits struct { + empty bool `json:"-"` + Memory *string `json:"memory"` + Cpu *string `json:"cpu"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } else { + + r.Memory = res.Memory + + r.Cpu = res.Cpu + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests struct { + empty bool `json:"-"` + Memory *string `json:"memory"` + Cpu *string `json:"cpu"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } else { + + r.Memory = res.Memory + + r.Cpu = res.Cpu + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations struct { + empty bool `json:"-"` + Key *string `json:"key"` + Operator *string `json:"operator"` + Value *string `json:"value"` + Effect *string `json:"effect"` +} + +type jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) UnmarshalJSON(data []byte) error { + var res jsonFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } else { + + r.Key = res.Key + + r.Operator = res.Operator + + r.Value = res.Value + + r.Effect = res.Effect + + } + return nil +} + +// This object is used to assert a desired state where this FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{empty: true} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) Empty() bool { + return r.empty +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) String() string { + return dcl.SprintResource(r) +} + +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *FeatureMembership) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "gke_hub", + Type: "FeatureMembership", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "gkehub", +{{- end }} + } +} + +func (r *FeatureMembership) ID() (string, error) { + if err := extractFeatureMembershipFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "mesh": dcl.ValueOrEmptyString(nr.Mesh), + "configmanagement": dcl.ValueOrEmptyString(nr.Configmanagement), + "policycontroller": dcl.ValueOrEmptyString(nr.Policycontroller), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + "membership": dcl.ValueOrEmptyString(nr.Membership), + "membership_location": dcl.ValueOrEmptyString(nr.MembershipLocation), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", params), nil +} + +const FeatureMembershipMaxPage = -1 + +type FeatureMembershipList struct { + Items []*FeatureMembership + + nextToken string + + resource *FeatureMembership +} + +func (c *Client) DeleteFeatureMembership(ctx context.Context, r *FeatureMembership) error { + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 409: dcl.Retryability{ + Retryable: true, + Pattern: "", + Timeout: 60000000000, + }, + }))) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("FeatureMembership resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting FeatureMembership...") + deleteOp := deleteFeatureMembershipOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllFeatureMembership deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllFeatureMembership(ctx context.Context, project, location, feature string, filter func(*FeatureMembership) bool) error { + listObj, err := c.ListFeatureMembership(ctx, project, location, feature) + if err != nil { + return err + } + + err = c.deleteAllFeatureMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllFeatureMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyFeatureMembership(ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ + 409: dcl.Retryability{ + Retryable: true, + Pattern: "", + Timeout: 60000000000, + }, + }))) + var resultNewState *FeatureMembership + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyFeatureMembershipHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyFeatureMembershipHelper(c *Client, ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyFeatureMembership...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractFeatureMembershipFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.featureMembershipDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToFeatureMembershipDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []featureMembershipApiOperation + if create { + ops = append(ops, &createFeatureMembershipOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyFeatureMembershipDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyFeatureMembershipDiff(c *Client, ctx context.Context, desired *FeatureMembership, rawDesired *FeatureMembership, ops []featureMembershipApiOperation, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetFeatureMembership(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createFeatureMembershipOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapFeatureMembership(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeFeatureMembershipNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeFeatureMembershipNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeFeatureMembershipDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractFeatureMembershipFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractFeatureMembershipFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffFeatureMembership(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl new file mode 100644 index 000000000000..84eb94af8b06 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_membership_internal.go.tmpl @@ -0,0 +1,8174 @@ +package gkehub + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *FeatureMembership) validate() error { + + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Feature, "Feature"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Membership, "Membership"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Mesh) { + if err := r.Mesh.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Configmanagement) { + if err := r.Configmanagement.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Policycontroller) { + if err := r.Policycontroller.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipMesh) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagement) validate() error { + if !dcl.IsEmptyValueIndirect(r.ConfigSync) { + if err := r.ConfigSync.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PolicyController) { + if err := r.PolicyController.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.HierarchyController) { + if err := r.HierarchyController.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSync) validate() error { + if !dcl.IsEmptyValueIndirect(r.Git) { + if err := r.Git.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Oci) { + if err := r.Oci.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncGit) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementConfigSyncOci) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementPolicyController) validate() error { + if !dcl.IsEmptyValueIndirect(r.Monitoring) { + if err := r.Monitoring.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipConfigmanagementPolicyControllerMonitoring) validate() error { + return nil +} +func (r *FeatureMembershipConfigmanagementHierarchyController) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontroller) validate() error { + if err := dcl.Required(r, "policyControllerHubConfig"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.PolicyControllerHubConfig) { + if err := r.PolicyControllerHubConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.Monitoring) { + if err := r.Monitoring.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.PolicyContent) { + if err := r.PolicyContent.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) validate() error { + if !dcl.IsEmptyValueIndirect(r.TemplateLibrary) { + if err := r.TemplateLibrary.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) validate() error { + if err := dcl.ValidateAtLeastOneOfFieldsSet([]string{"ReplicaCount", "ContainerResources", "PodAffinity", "PodTolerations"}, r.ReplicaCount, r.ContainerResources, r.PodAffinity, r.PodTolerations); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ContainerResources) { + if err := r.ContainerResources.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) validate() error { + if !dcl.IsEmptyValueIndirect(r.Limits) { + if err := r.Limits.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Requests) { + if err := r.Requests.validate(); err != nil { + return err + } + } + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) validate() error { + return nil +} +func (r *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) validate() error { + return nil +} +func (r *FeatureMembership) basePath() string { + params := map[string]interface{}{} +{{- if ne $.TargetVersionName "ga" }} + return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) +{{- else }} + return dcl.Nprintf("https://gkehub.googleapis.com/v1/", params) +{{- end }} +} + +func (r *FeatureMembership) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *FeatureMembership) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *FeatureMembership) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *FeatureMembership) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// featureMembershipApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type featureMembershipApiOperation interface { + do(context.Context, *FeatureMembership, *Client) error +} + +// newUpdateFeatureMembershipUpdateFeatureMembershipRequest creates a request for an +// FeatureMembership resource's UpdateFeatureMembership update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateFeatureMembershipUpdateFeatureMembershipRequest(ctx context.Context, f *FeatureMembership, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := expandFeatureMembershipMesh(c, f.Mesh, res); err != nil { + return nil, fmt.Errorf("error expanding Mesh into mesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["mesh"] = v + } + if v, err := expandFeatureMembershipConfigmanagement(c, f.Configmanagement, res); err != nil { + return nil, fmt.Errorf("error expanding Configmanagement into configmanagement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["configmanagement"] = v + } + if v, err := expandFeatureMembershipPolicycontroller(c, f.Policycontroller, res); err != nil { + return nil, fmt.Errorf("error expanding Policycontroller into policycontroller: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["policycontroller"] = v + } + return req, nil +} + +// marshalUpdateFeatureMembershipUpdateFeatureMembershipRequest converts the update into +// the final JSON request body. +func marshalUpdateFeatureMembershipUpdateFeatureMembershipRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateFeatureMembershipUpdateFeatureMembershipOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (c *Client) deleteAllFeatureMembership(ctx context.Context, f func(*FeatureMembership) bool, resources []*FeatureMembership) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteFeatureMembership(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteFeatureMembershipOperation struct{} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createFeatureMembershipOperation struct { + response map[string]interface{} +} + +func (op *createFeatureMembershipOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (c *Client) featureMembershipDiffsForRawDesired(ctx context.Context, rawDesired *FeatureMembership, opts ...dcl.ApplyOption) (initial, desired *FeatureMembership, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *FeatureMembership + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*FeatureMembership); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected FeatureMembership, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetFeatureMembership(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a FeatureMembership resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve FeatureMembership resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that FeatureMembership resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for FeatureMembership: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for FeatureMembership: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractFeatureMembershipFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeFeatureMembershipInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for FeatureMembership: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for FeatureMembership: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffFeatureMembership(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeFeatureMembershipInitialState(rawInitial, rawDesired *FeatureMembership) (*FeatureMembership, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeFeatureMembershipDesiredState(rawDesired, rawInitial *FeatureMembership, opts ...dcl.ApplyOption) (*FeatureMembership, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Mesh = canonicalizeFeatureMembershipMesh(rawDesired.Mesh, nil, opts...) + rawDesired.Configmanagement = canonicalizeFeatureMembershipConfigmanagement(rawDesired.Configmanagement, nil, opts...) + rawDesired.Policycontroller = canonicalizeFeatureMembershipPolicycontroller(rawDesired.Policycontroller, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &FeatureMembership{} + canonicalDesired.Mesh = canonicalizeFeatureMembershipMesh(rawDesired.Mesh, rawInitial.Mesh, opts...) + canonicalDesired.Configmanagement = canonicalizeFeatureMembershipConfigmanagement(rawDesired.Configmanagement, rawInitial.Configmanagement, opts...) + canonicalDesired.Policycontroller = canonicalizeFeatureMembershipPolicycontroller(rawDesired.Policycontroller, rawInitial.Policycontroller, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Feature, rawInitial.Feature) { + canonicalDesired.Feature = rawInitial.Feature + } else { + canonicalDesired.Feature = rawDesired.Feature + } + if dcl.NameToSelfLink(rawDesired.Membership, rawInitial.Membership) { + canonicalDesired.Membership = rawInitial.Membership + } else { + canonicalDesired.Membership = rawDesired.Membership + } + if dcl.NameToSelfLink(rawDesired.MembershipLocation, rawInitial.MembershipLocation) { + canonicalDesired.MembershipLocation = rawInitial.MembershipLocation + } else { + canonicalDesired.MembershipLocation = rawDesired.MembershipLocation + } + return canonicalDesired, nil +} + +func canonicalizeFeatureMembershipNewState(c *Client, rawNew, rawDesired *FeatureMembership) (*FeatureMembership, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Mesh) && dcl.IsEmptyValueIndirect(rawDesired.Mesh) { + rawNew.Mesh = rawDesired.Mesh + } else { + rawNew.Mesh = canonicalizeNewFeatureMembershipMesh(c, rawDesired.Mesh, rawNew.Mesh) + } + + if dcl.IsEmptyValueIndirect(rawNew.Configmanagement) && dcl.IsEmptyValueIndirect(rawDesired.Configmanagement) { + rawNew.Configmanagement = rawDesired.Configmanagement + } else { + rawNew.Configmanagement = canonicalizeNewFeatureMembershipConfigmanagement(c, rawDesired.Configmanagement, rawNew.Configmanagement) + } + + if dcl.IsEmptyValueIndirect(rawNew.Policycontroller) && dcl.IsEmptyValueIndirect(rawDesired.Policycontroller) { + rawNew.Policycontroller = rawDesired.Policycontroller + } else { + rawNew.Policycontroller = canonicalizeNewFeatureMembershipPolicycontroller(c, rawDesired.Policycontroller, rawNew.Policycontroller) + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Feature = rawDesired.Feature + + rawNew.Membership = rawDesired.Membership + + rawNew.MembershipLocation = rawDesired.MembershipLocation + + return rawNew, nil +} + +func canonicalizeFeatureMembershipMesh(des, initial *FeatureMembershipMesh, opts ...dcl.ApplyOption) *FeatureMembershipMesh { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipMesh{} + + if dcl.IsZeroValue(des.Management) || (dcl.IsEmptyValueIndirect(des.Management) && dcl.IsEmptyValueIndirect(initial.Management)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Management = initial.Management + } else { + cDes.Management = des.Management + } + if dcl.IsZeroValue(des.ControlPlane) || (dcl.IsEmptyValueIndirect(des.ControlPlane) && dcl.IsEmptyValueIndirect(initial.ControlPlane)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ControlPlane = initial.ControlPlane + } else { + cDes.ControlPlane = des.ControlPlane + } + + return cDes +} + +func canonicalizeFeatureMembershipMeshSlice(des, initial []FeatureMembershipMesh, opts ...dcl.ApplyOption) []FeatureMembershipMesh { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipMesh, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipMesh(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipMesh, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipMesh(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipMesh(c *Client, des, nw *FeatureMembershipMesh) *FeatureMembershipMesh { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipMesh while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipMeshSet(c *Client, des, nw []FeatureMembershipMesh) []FeatureMembershipMesh { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipMesh + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipMeshNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipMesh(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipMeshSlice(c *Client, des, nw []FeatureMembershipMesh) []FeatureMembershipMesh { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipMesh + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipMesh(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagement(des, initial *FeatureMembershipConfigmanagement, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagement { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagement{} + + cDes.ConfigSync = canonicalizeFeatureMembershipConfigmanagementConfigSync(des.ConfigSync, initial.ConfigSync, opts...) + cDes.PolicyController = canonicalizeFeatureMembershipConfigmanagementPolicyController(des.PolicyController, initial.PolicyController, opts...) + cDes.HierarchyController = canonicalizeFeatureMembershipConfigmanagementHierarchyController(des.HierarchyController, initial.HierarchyController, opts...) + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + if dcl.IsZeroValue(des.Management) || (dcl.IsEmptyValueIndirect(des.Management) && dcl.IsEmptyValueIndirect(initial.Management)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Management = initial.Management + } else { + cDes.Management = des.Management + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementSlice(des, initial []FeatureMembershipConfigmanagement, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagement { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagement, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagement(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagement, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagement(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagement(c *Client, des, nw *FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagement { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagement while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ConfigSync = canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, des.ConfigSync, nw.ConfigSync) + nw.PolicyController = canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, des.PolicyController, nw.PolicyController) + nw.HierarchyController = canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, des.HierarchyController, nw.HierarchyController) + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementSet(c *Client, des, nw []FeatureMembershipConfigmanagement) []FeatureMembershipConfigmanagement { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagement + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagement(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementSlice(c *Client, des, nw []FeatureMembershipConfigmanagement) []FeatureMembershipConfigmanagement { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagement + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagement(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSync(des, initial *FeatureMembershipConfigmanagementConfigSync, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSync{} + + cDes.DeploymentOverrides = canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(des.DeploymentOverrides, initial.DeploymentOverrides, opts...) + cDes.Git = canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(des.Git, initial.Git, opts...) + if dcl.StringCanonicalize(des.SourceFormat, initial.SourceFormat) || dcl.IsZeroValue(des.SourceFormat) { + cDes.SourceFormat = initial.SourceFormat + } else { + cDes.SourceFormat = des.SourceFormat + } + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.StopSyncing, initial.StopSyncing) || dcl.IsZeroValue(des.StopSyncing) { + cDes.StopSyncing = initial.StopSyncing + } else { + cDes.StopSyncing = des.StopSyncing + } + if dcl.BoolCanonicalize(des.PreventDrift, initial.PreventDrift) || dcl.IsZeroValue(des.PreventDrift) { + cDes.PreventDrift = initial.PreventDrift + } else { + cDes.PreventDrift = des.PreventDrift + } + if dcl.IsZeroValue(des.MetricsGcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.MetricsGcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.MetricsGcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MetricsGcpServiceAccountEmail = initial.MetricsGcpServiceAccountEmail + } else { + cDes.MetricsGcpServiceAccountEmail = des.MetricsGcpServiceAccountEmail + } + cDes.Oci = canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(des.Oci, initial.Oci, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncSlice(des, initial []FeatureMembershipConfigmanagementConfigSync, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSync(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSync(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSync) *FeatureMembershipConfigmanagementConfigSync { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSync while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.DeploymentOverrides = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, des.DeploymentOverrides, nw.DeploymentOverrides) + nw.Git = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, des.Git, nw.Git) + if dcl.StringCanonicalize(des.SourceFormat, nw.SourceFormat) { + nw.SourceFormat = des.SourceFormat + } + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.StopSyncing, nw.StopSyncing) { + nw.StopSyncing = des.StopSyncing + } + if dcl.BoolCanonicalize(des.PreventDrift, nw.PreventDrift) { + nw.PreventDrift = des.PreventDrift + } + nw.Oci = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, des.Oci, nw.Oci) + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSync) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSync + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSync) []FeatureMembershipConfigmanagementConfigSync { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSync + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSync(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(des, initial *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + + if dcl.StringCanonicalize(des.DeploymentName, initial.DeploymentName) || dcl.IsZeroValue(des.DeploymentName) { + cDes.DeploymentName = initial.DeploymentName + } else { + cDes.DeploymentName = des.DeploymentName + } + if dcl.StringCanonicalize(des.DeploymentNamespace, initial.DeploymentNamespace) || dcl.IsZeroValue(des.DeploymentNamespace) { + cDes.DeploymentNamespace = initial.DeploymentNamespace + } else { + cDes.DeploymentNamespace = des.DeploymentNamespace + } + cDes.Containers = canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(des.Containers, initial.Containers, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.DeploymentName, nw.DeploymentName) { + nw.DeploymentName = des.DeploymentName + } + if dcl.StringCanonicalize(des.DeploymentNamespace, nw.DeploymentNamespace) { + nw.DeploymentNamespace = des.DeploymentNamespace + } + nw.Containers = canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, des.Containers, nw.Containers) + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(des, initial *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + + if dcl.StringCanonicalize(des.ContainerName, initial.ContainerName) || dcl.IsZeroValue(des.ContainerName) { + cDes.ContainerName = initial.ContainerName + } else { + cDes.ContainerName = des.ContainerName + } + if dcl.StringCanonicalize(des.CpuRequest, initial.CpuRequest) || dcl.IsZeroValue(des.CpuRequest) { + cDes.CpuRequest = initial.CpuRequest + } else { + cDes.CpuRequest = des.CpuRequest + } + if dcl.StringCanonicalize(des.MemoryRequest, initial.MemoryRequest) || dcl.IsZeroValue(des.MemoryRequest) { + cDes.MemoryRequest = initial.MemoryRequest + } else { + cDes.MemoryRequest = des.MemoryRequest + } + if dcl.StringCanonicalize(des.CpuLimit, initial.CpuLimit) || dcl.IsZeroValue(des.CpuLimit) { + cDes.CpuLimit = initial.CpuLimit + } else { + cDes.CpuLimit = des.CpuLimit + } + if dcl.StringCanonicalize(des.MemoryLimit, initial.MemoryLimit) || dcl.IsZeroValue(des.MemoryLimit) { + cDes.MemoryLimit = initial.MemoryLimit + } else { + cDes.MemoryLimit = des.MemoryLimit + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ContainerName, nw.ContainerName) { + nw.ContainerName = des.ContainerName + } + if dcl.StringCanonicalize(des.CpuRequest, nw.CpuRequest) { + nw.CpuRequest = des.CpuRequest + } + if dcl.StringCanonicalize(des.MemoryRequest, nw.MemoryRequest) { + nw.MemoryRequest = des.MemoryRequest + } + if dcl.StringCanonicalize(des.CpuLimit, nw.CpuLimit) { + nw.CpuLimit = des.CpuLimit + } + if dcl.StringCanonicalize(des.MemoryLimit, nw.MemoryLimit) { + nw.MemoryLimit = des.MemoryLimit + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(des, initial *FeatureMembershipConfigmanagementConfigSyncGit, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncGit{} + + if dcl.StringCanonicalize(des.SyncRepo, initial.SyncRepo) || dcl.IsZeroValue(des.SyncRepo) { + cDes.SyncRepo = initial.SyncRepo + } else { + cDes.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.SyncBranch, initial.SyncBranch) || dcl.IsZeroValue(des.SyncBranch) { + cDes.SyncBranch = initial.SyncBranch + } else { + cDes.SyncBranch = des.SyncBranch + } + if dcl.StringCanonicalize(des.PolicyDir, initial.PolicyDir) || dcl.IsZeroValue(des.PolicyDir) { + cDes.PolicyDir = initial.PolicyDir + } else { + cDes.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, initial.SyncWaitSecs) || dcl.IsZeroValue(des.SyncWaitSecs) { + cDes.SyncWaitSecs = initial.SyncWaitSecs + } else { + cDes.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SyncRev, initial.SyncRev) || dcl.IsZeroValue(des.SyncRev) { + cDes.SyncRev = initial.SyncRev + } else { + cDes.SyncRev = des.SyncRev + } + if dcl.StringCanonicalize(des.SecretType, initial.SecretType) || dcl.IsZeroValue(des.SecretType) { + cDes.SecretType = initial.SecretType + } else { + cDes.SecretType = des.SecretType + } + if dcl.StringCanonicalize(des.HttpsProxy, initial.HttpsProxy) || dcl.IsZeroValue(des.HttpsProxy) { + cDes.HttpsProxy = initial.HttpsProxy + } else { + cDes.HttpsProxy = des.HttpsProxy + } + if dcl.IsZeroValue(des.GcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.GcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.GcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcpServiceAccountEmail = initial.GcpServiceAccountEmail + } else { + cDes.GcpServiceAccountEmail = des.GcpServiceAccountEmail + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncGitSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncGit, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncGit { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncGit(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncGit) *FeatureMembershipConfigmanagementConfigSyncGit { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncGit while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SyncRepo, nw.SyncRepo) { + nw.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.SyncBranch, nw.SyncBranch) { + nw.SyncBranch = des.SyncBranch + } + if dcl.StringCanonicalize(des.PolicyDir, nw.PolicyDir) { + nw.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, nw.SyncWaitSecs) { + nw.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SyncRev, nw.SyncRev) { + nw.SyncRev = des.SyncRev + } + if dcl.StringCanonicalize(des.SecretType, nw.SecretType) { + nw.SecretType = des.SecretType + } + if dcl.StringCanonicalize(des.HttpsProxy, nw.HttpsProxy) { + nw.HttpsProxy = des.HttpsProxy + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGitSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncGit) []FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncGit + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncGit) []FeatureMembershipConfigmanagementConfigSyncGit { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncGit + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncGit(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(des, initial *FeatureMembershipConfigmanagementConfigSyncOci, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementConfigSyncOci{} + + if dcl.StringCanonicalize(des.SyncRepo, initial.SyncRepo) || dcl.IsZeroValue(des.SyncRepo) { + cDes.SyncRepo = initial.SyncRepo + } else { + cDes.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.PolicyDir, initial.PolicyDir) || dcl.IsZeroValue(des.PolicyDir) { + cDes.PolicyDir = initial.PolicyDir + } else { + cDes.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, initial.SyncWaitSecs) || dcl.IsZeroValue(des.SyncWaitSecs) { + cDes.SyncWaitSecs = initial.SyncWaitSecs + } else { + cDes.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SecretType, initial.SecretType) || dcl.IsZeroValue(des.SecretType) { + cDes.SecretType = initial.SecretType + } else { + cDes.SecretType = des.SecretType + } + if dcl.IsZeroValue(des.GcpServiceAccountEmail) || (dcl.IsEmptyValueIndirect(des.GcpServiceAccountEmail) && dcl.IsEmptyValueIndirect(initial.GcpServiceAccountEmail)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.GcpServiceAccountEmail = initial.GcpServiceAccountEmail + } else { + cDes.GcpServiceAccountEmail = des.GcpServiceAccountEmail + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementConfigSyncOciSlice(des, initial []FeatureMembershipConfigmanagementConfigSyncOci, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementConfigSyncOci { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementConfigSyncOci(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c *Client, des, nw *FeatureMembershipConfigmanagementConfigSyncOci) *FeatureMembershipConfigmanagementConfigSyncOci { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementConfigSyncOci while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.SyncRepo, nw.SyncRepo) { + nw.SyncRepo = des.SyncRepo + } + if dcl.StringCanonicalize(des.PolicyDir, nw.PolicyDir) { + nw.PolicyDir = des.PolicyDir + } + if dcl.StringCanonicalize(des.SyncWaitSecs, nw.SyncWaitSecs) { + nw.SyncWaitSecs = des.SyncWaitSecs + } + if dcl.StringCanonicalize(des.SecretType, nw.SecretType) { + nw.SecretType = des.SecretType + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOciSet(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncOci) []FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementConfigSyncOci + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, des, nw []FeatureMembershipConfigmanagementConfigSyncOci) []FeatureMembershipConfigmanagementConfigSyncOci { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementConfigSyncOci + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementConfigSyncOci(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyController(des, initial *FeatureMembershipConfigmanagementPolicyController, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementPolicyController{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, initial.ExemptableNamespaces) { + cDes.ExemptableNamespaces = initial.ExemptableNamespaces + } else { + cDes.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, initial.ReferentialRulesEnabled) || dcl.IsZeroValue(des.ReferentialRulesEnabled) { + cDes.ReferentialRulesEnabled = initial.ReferentialRulesEnabled + } else { + cDes.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, initial.LogDeniesEnabled) || dcl.IsZeroValue(des.LogDeniesEnabled) { + cDes.LogDeniesEnabled = initial.LogDeniesEnabled + } else { + cDes.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, initial.MutationEnabled) || dcl.IsZeroValue(des.MutationEnabled) { + cDes.MutationEnabled = initial.MutationEnabled + } else { + cDes.MutationEnabled = des.MutationEnabled + } + cDes.Monitoring = canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(des.Monitoring, initial.Monitoring, opts...) + if dcl.BoolCanonicalize(des.TemplateLibraryInstalled, initial.TemplateLibraryInstalled) || dcl.IsZeroValue(des.TemplateLibraryInstalled) { + cDes.TemplateLibraryInstalled = initial.TemplateLibraryInstalled + } else { + cDes.TemplateLibraryInstalled = des.TemplateLibraryInstalled + } + if dcl.StringCanonicalize(des.AuditIntervalSeconds, initial.AuditIntervalSeconds) || dcl.IsZeroValue(des.AuditIntervalSeconds) { + cDes.AuditIntervalSeconds = initial.AuditIntervalSeconds + } else { + cDes.AuditIntervalSeconds = des.AuditIntervalSeconds + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerSlice(des, initial []FeatureMembershipConfigmanagementPolicyController, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementPolicyController { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyController(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyController(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c *Client, des, nw *FeatureMembershipConfigmanagementPolicyController) *FeatureMembershipConfigmanagementPolicyController { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementPolicyController while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, nw.ExemptableNamespaces) { + nw.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, nw.ReferentialRulesEnabled) { + nw.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, nw.LogDeniesEnabled) { + nw.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, nw.MutationEnabled) { + nw.MutationEnabled = des.MutationEnabled + } + nw.Monitoring = canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, des.Monitoring, nw.Monitoring) + if dcl.BoolCanonicalize(des.TemplateLibraryInstalled, nw.TemplateLibraryInstalled) { + nw.TemplateLibraryInstalled = des.TemplateLibraryInstalled + } + if dcl.StringCanonicalize(des.AuditIntervalSeconds, nw.AuditIntervalSeconds) { + nw.AuditIntervalSeconds = des.AuditIntervalSeconds + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerSet(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyController) []FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementPolicyController + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementPolicyControllerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyController) []FeatureMembershipConfigmanagementPolicyController { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementPolicyController + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyController(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(des, initial *FeatureMembershipConfigmanagementPolicyControllerMonitoring, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + + if dcl.IsZeroValue(des.Backends) || (dcl.IsEmptyValueIndirect(des.Backends) && dcl.IsEmptyValueIndirect(initial.Backends)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Backends = initial.Backends + } else { + cDes.Backends = des.Backends + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(des, initial []FeatureMembershipConfigmanagementPolicyControllerMonitoring, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementPolicyControllerMonitoring(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, des, nw *FeatureMembershipConfigmanagementPolicyControllerMonitoring) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementPolicyControllerMonitoring while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoringSet(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyControllerMonitoring) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementPolicyControllerMonitoring + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, des, nw []FeatureMembershipConfigmanagementPolicyControllerMonitoring) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementPolicyControllerMonitoring + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipConfigmanagementHierarchyController(des, initial *FeatureMembershipConfigmanagementHierarchyController, opts ...dcl.ApplyOption) *FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipConfigmanagementHierarchyController{} + + if dcl.BoolCanonicalize(des.Enabled, initial.Enabled) || dcl.IsZeroValue(des.Enabled) { + cDes.Enabled = initial.Enabled + } else { + cDes.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.EnablePodTreeLabels, initial.EnablePodTreeLabels) || dcl.IsZeroValue(des.EnablePodTreeLabels) { + cDes.EnablePodTreeLabels = initial.EnablePodTreeLabels + } else { + cDes.EnablePodTreeLabels = des.EnablePodTreeLabels + } + if dcl.BoolCanonicalize(des.EnableHierarchicalResourceQuota, initial.EnableHierarchicalResourceQuota) || dcl.IsZeroValue(des.EnableHierarchicalResourceQuota) { + cDes.EnableHierarchicalResourceQuota = initial.EnableHierarchicalResourceQuota + } else { + cDes.EnableHierarchicalResourceQuota = des.EnableHierarchicalResourceQuota + } + + return cDes +} + +func canonicalizeFeatureMembershipConfigmanagementHierarchyControllerSlice(des, initial []FeatureMembershipConfigmanagementHierarchyController, opts ...dcl.ApplyOption) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementHierarchyController(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipConfigmanagementHierarchyController(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c *Client, des, nw *FeatureMembershipConfigmanagementHierarchyController) *FeatureMembershipConfigmanagementHierarchyController { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipConfigmanagementHierarchyController while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Enabled, nw.Enabled) { + nw.Enabled = des.Enabled + } + if dcl.BoolCanonicalize(des.EnablePodTreeLabels, nw.EnablePodTreeLabels) { + nw.EnablePodTreeLabels = des.EnablePodTreeLabels + } + if dcl.BoolCanonicalize(des.EnableHierarchicalResourceQuota, nw.EnableHierarchicalResourceQuota) { + nw.EnableHierarchicalResourceQuota = des.EnableHierarchicalResourceQuota + } + + return nw +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyControllerSet(c *Client, des, nw []FeatureMembershipConfigmanagementHierarchyController) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipConfigmanagementHierarchyController + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, des, nw []FeatureMembershipConfigmanagementHierarchyController) []FeatureMembershipConfigmanagementHierarchyController { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipConfigmanagementHierarchyController + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipConfigmanagementHierarchyController(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontroller(des, initial *FeatureMembershipPolicycontroller, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontroller { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontroller{} + + if dcl.StringCanonicalize(des.Version, initial.Version) || dcl.IsZeroValue(des.Version) { + cDes.Version = initial.Version + } else { + cDes.Version = des.Version + } + cDes.PolicyControllerHubConfig = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(des.PolicyControllerHubConfig, initial.PolicyControllerHubConfig, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerSlice(des, initial []FeatureMembershipPolicycontroller, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontroller { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontroller, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontroller(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontroller, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontroller(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontroller(c *Client, des, nw *FeatureMembershipPolicycontroller) *FeatureMembershipPolicycontroller { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontroller while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Version, nw.Version) { + nw.Version = des.Version + } + nw.PolicyControllerHubConfig = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, des.PolicyControllerHubConfig, nw.PolicyControllerHubConfig) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerSet(c *Client, des, nw []FeatureMembershipPolicycontroller) []FeatureMembershipPolicycontroller { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontroller + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontroller(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerSlice(c *Client, des, nw []FeatureMembershipPolicycontroller) []FeatureMembershipPolicycontroller { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontroller + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontroller(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfig, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + + if dcl.IsZeroValue(des.InstallSpec) || (dcl.IsEmptyValueIndirect(des.InstallSpec) && dcl.IsEmptyValueIndirect(initial.InstallSpec)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.InstallSpec = initial.InstallSpec + } else { + cDes.InstallSpec = des.InstallSpec + } + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, initial.ExemptableNamespaces) { + cDes.ExemptableNamespaces = initial.ExemptableNamespaces + } else { + cDes.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, initial.ReferentialRulesEnabled) || dcl.IsZeroValue(des.ReferentialRulesEnabled) { + cDes.ReferentialRulesEnabled = initial.ReferentialRulesEnabled + } else { + cDes.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, initial.LogDeniesEnabled) || dcl.IsZeroValue(des.LogDeniesEnabled) { + cDes.LogDeniesEnabled = initial.LogDeniesEnabled + } else { + cDes.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, initial.MutationEnabled) || dcl.IsZeroValue(des.MutationEnabled) { + cDes.MutationEnabled = initial.MutationEnabled + } else { + cDes.MutationEnabled = des.MutationEnabled + } + cDes.Monitoring = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(des.Monitoring, initial.Monitoring, opts...) + if dcl.IsZeroValue(des.AuditIntervalSeconds) || (dcl.IsEmptyValueIndirect(des.AuditIntervalSeconds) && dcl.IsEmptyValueIndirect(initial.AuditIntervalSeconds)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AuditIntervalSeconds = initial.AuditIntervalSeconds + } else { + cDes.AuditIntervalSeconds = des.AuditIntervalSeconds + } + if dcl.IsZeroValue(des.ConstraintViolationLimit) || (dcl.IsEmptyValueIndirect(des.ConstraintViolationLimit) && dcl.IsEmptyValueIndirect(initial.ConstraintViolationLimit)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ConstraintViolationLimit = initial.ConstraintViolationLimit + } else { + cDes.ConstraintViolationLimit = des.ConstraintViolationLimit + } + cDes.PolicyContent = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(des.PolicyContent, initial.PolicyContent, opts...) + if dcl.IsZeroValue(des.DeploymentConfigs) || (dcl.IsEmptyValueIndirect(des.DeploymentConfigs) && dcl.IsEmptyValueIndirect(initial.DeploymentConfigs)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.DeploymentConfigs = initial.DeploymentConfigs + } else { + cDes.DeploymentConfigs = des.DeploymentConfigs + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfig, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.ExemptableNamespaces, nw.ExemptableNamespaces) { + nw.ExemptableNamespaces = des.ExemptableNamespaces + } + if dcl.BoolCanonicalize(des.ReferentialRulesEnabled, nw.ReferentialRulesEnabled) { + nw.ReferentialRulesEnabled = des.ReferentialRulesEnabled + } + if dcl.BoolCanonicalize(des.LogDeniesEnabled, nw.LogDeniesEnabled) { + nw.LogDeniesEnabled = des.LogDeniesEnabled + } + if dcl.BoolCanonicalize(des.MutationEnabled, nw.MutationEnabled) { + nw.MutationEnabled = des.MutationEnabled + } + nw.Monitoring = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, des.Monitoring, nw.Monitoring) + nw.PolicyContent = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, des.PolicyContent, nw.PolicyContent) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfig) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfig) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + + if dcl.IsZeroValue(des.Backends) || (dcl.IsEmptyValueIndirect(des.Backends) && dcl.IsEmptyValueIndirect(initial.Backends)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Backends = initial.Backends + } else { + cDes.Backends = des.Backends + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + + cDes.TemplateLibrary = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(des.TemplateLibrary, initial.TemplateLibrary, opts...) + if dcl.IsZeroValue(des.Bundles) || (dcl.IsEmptyValueIndirect(des.Bundles) && dcl.IsEmptyValueIndirect(initial.Bundles)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Bundles = initial.Bundles + } else { + cDes.Bundles = des.Bundles + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.TemplateLibrary = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, des.TemplateLibrary, nw.TemplateLibrary) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + + if dcl.IsZeroValue(des.Installation) || (dcl.IsEmptyValueIndirect(des.Installation) && dcl.IsEmptyValueIndirect(initial.Installation)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Installation = initial.Installation + } else { + cDes.Installation = des.Installation + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + + if dcl.StringArrayCanonicalize(des.ExemptedNamespaces, initial.ExemptedNamespaces) { + cDes.ExemptedNamespaces = initial.ExemptedNamespaces + } else { + cDes.ExemptedNamespaces = des.ExemptedNamespaces + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringArrayCanonicalize(des.ExemptedNamespaces, nw.ExemptedNamespaces) { + nw.ExemptedNamespaces = des.ExemptedNamespaces + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.ReplicaCount != nil || (initial != nil && initial.ReplicaCount != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ContainerResources, des.PodAffinity, des.PodTolerations) { + des.ReplicaCount = nil + if initial != nil { + initial.ReplicaCount = nil + } + } + } + + if des.ContainerResources != nil || (initial != nil && initial.ContainerResources != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.PodAffinity, des.PodTolerations) { + des.ContainerResources = nil + if initial != nil { + initial.ContainerResources = nil + } + } + } + + if des.PodAffinity != nil || (initial != nil && initial.PodAffinity != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.ContainerResources, des.PodTolerations) { + des.PodAffinity = nil + if initial != nil { + initial.PodAffinity = nil + } + } + } + + if des.PodTolerations != nil || (initial != nil && initial.PodTolerations != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ReplicaCount, des.ContainerResources, des.PodAffinity) { + des.PodTolerations = nil + if initial != nil { + initial.PodTolerations = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + + if dcl.IsZeroValue(des.ReplicaCount) || (dcl.IsEmptyValueIndirect(des.ReplicaCount) && dcl.IsEmptyValueIndirect(initial.ReplicaCount)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ReplicaCount = initial.ReplicaCount + } else { + cDes.ReplicaCount = des.ReplicaCount + } + cDes.ContainerResources = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(des.ContainerResources, initial.ContainerResources, opts...) + if dcl.IsZeroValue(des.PodAffinity) || (dcl.IsEmptyValueIndirect(des.PodAffinity) && dcl.IsEmptyValueIndirect(initial.PodAffinity)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PodAffinity = initial.PodAffinity + } else { + cDes.PodAffinity = des.PodAffinity + } + cDes.PodTolerations = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(des.PodTolerations, initial.PodTolerations, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.ContainerResources = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, des.ContainerResources, nw.ContainerResources) + nw.PodTolerations = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, des.PodTolerations, nw.PodTolerations) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + + cDes.Limits = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(des.Limits, initial.Limits, opts...) + cDes.Requests = canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(des.Requests, initial.Requests, opts...) + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Limits = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, des.Limits, nw.Limits) + nw.Requests = canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, des.Requests, nw.Requests) + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + + if dcl.StringCanonicalize(des.Memory, initial.Memory) || dcl.IsZeroValue(des.Memory) { + cDes.Memory = initial.Memory + } else { + cDes.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, initial.Cpu) || dcl.IsZeroValue(des.Cpu) { + cDes.Cpu = initial.Cpu + } else { + cDes.Cpu = des.Cpu + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Memory, nw.Memory) { + nw.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, nw.Cpu) { + nw.Cpu = des.Cpu + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + + if dcl.StringCanonicalize(des.Memory, initial.Memory) || dcl.IsZeroValue(des.Memory) { + cDes.Memory = initial.Memory + } else { + cDes.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, initial.Cpu) || dcl.IsZeroValue(des.Cpu) { + cDes.Cpu = initial.Cpu + } else { + cDes.Cpu = des.Cpu + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Memory, nw.Memory) { + nw.Memory = des.Memory + } + if dcl.StringCanonicalize(des.Cpu, nw.Cpu) { + nw.Cpu = des.Cpu + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &d, &n)) + } + + return items +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(des, initial *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, opts ...dcl.ApplyOption) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + + if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { + cDes.Key = initial.Key + } else { + cDes.Key = des.Key + } + if dcl.StringCanonicalize(des.Operator, initial.Operator) || dcl.IsZeroValue(des.Operator) { + cDes.Operator = initial.Operator + } else { + cDes.Operator = des.Operator + } + if dcl.StringCanonicalize(des.Value, initial.Value) || dcl.IsZeroValue(des.Value) { + cDes.Value = initial.Value + } else { + cDes.Value = des.Value + } + if dcl.StringCanonicalize(des.Effect, initial.Effect) || dcl.IsZeroValue(des.Effect) { + cDes.Effect = initial.Effect + } else { + cDes.Effect = des.Effect + } + + return cDes +} + +func canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(des, initial []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, opts ...dcl.ApplyOption) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(des)) + for _, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(des)) + for i, d := range des { + cd := canonicalizeFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, des, nw *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Key, nw.Key) { + nw.Key = des.Key + } + if dcl.StringCanonicalize(des.Operator, nw.Operator) { + nw.Operator = des.Operator + } + if dcl.StringCanonicalize(des.Value, nw.Value) { + nw.Value = des.Value + } + if dcl.StringCanonicalize(des.Effect, nw.Effect) { + nw.Effect = des.Effect + } + + return nw +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSet(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, des, nw []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffFeatureMembership(c *Client, desired, actual *FeatureMembership, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Mesh, actual.Mesh, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipMeshNewStyle, EmptyObject: EmptyFeatureMembershipMesh, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Mesh")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Configmanagement, actual.Configmanagement, dcl.DiffInfo{MergeNestedDiffs: true, ObjectFunction: compareFeatureMembershipConfigmanagementNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagement, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Configmanagement")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Policycontroller, actual.Policycontroller, dcl.DiffInfo{MergeNestedDiffs: true, ObjectFunction: compareFeatureMembershipPolicycontrollerNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontroller, OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Policycontroller")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Feature, actual.Feature, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Feature")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Membership, actual.Membership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Membership")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.MembershipLocation, actual.MembershipLocation, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipLocation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareFeatureMembershipMeshNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipMesh) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipMesh or *FeatureMembershipMesh", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipMesh) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipMesh", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ControlPlane, actual.ControlPlane, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("ControlPlane")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagement) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagement or *FeatureMembershipConfigmanagement", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagement) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagement) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagement", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConfigSync, actual.ConfigSync, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSync, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigSync")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyController, actual.PolicyController, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementPolicyControllerNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementPolicyController, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyController")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HierarchyController, actual.HierarchyController, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementHierarchyController, CustomDiff: emptyHNCSameAsAllFalse, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HierarchyController")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Management, actual.Management, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Management")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSync) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSync) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSync or *FeatureMembershipConfigmanagementConfigSync", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSync) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSync) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSync", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DeploymentOverrides, actual.DeploymentOverrides, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentOverrides")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Git, actual.Git, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncGit, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Git")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SourceFormat, actual.SourceFormat, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SourceFormat")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.StopSyncing, actual.StopSyncing, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StopSyncing")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PreventDrift, actual.PreventDrift, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PreventDrift")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MetricsGcpServiceAccountEmail, actual.MetricsGcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricsGcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Oci, actual.Oci, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncOci, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Oci")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides or *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.DeploymentName, actual.DeploymentName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeploymentNamespace, actual.DeploymentNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentNamespace")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Containers, actual.Containers, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Containers")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers or *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ContainerName, actual.ContainerName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContainerName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuRequest, actual.CpuRequest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuRequest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MemoryRequest, actual.MemoryRequest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryRequest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CpuLimit, actual.CpuLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CpuLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MemoryLimit, actual.MemoryLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncGitNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncGit or *FeatureMembershipConfigmanagementConfigSyncGit", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncGit) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncGit", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SyncRepo, actual.SyncRepo, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncRepo")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncBranch, actual.SyncBranch, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncBranch")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyDir, actual.PolicyDir, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyDir")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncWaitSecs, actual.SyncWaitSecs, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncWaitSecs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncRev, actual.SyncRev, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SyncRev")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretType, actual.SecretType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecretType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.HttpsProxy, actual.HttpsProxy, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpsProxy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GcpServiceAccountEmail, actual.GcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementConfigSyncOciNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncOci or *FeatureMembershipConfigmanagementConfigSyncOci", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementConfigSyncOci) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementConfigSyncOci", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.SyncRepo, actual.SyncRepo, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SyncRepo")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyDir, actual.PolicyDir, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("PolicyDir")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SyncWaitSecs, actual.SyncWaitSecs, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SyncWaitSecs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SecretType, actual.SecretType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("SecretType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.GcpServiceAccountEmail, actual.GcpServiceAccountEmail, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateFeatureMembershipUpdateFeatureMembershipOperation")}, fn.AddNest("GcpServiceAccountEmail")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementPolicyControllerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementPolicyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementPolicyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyController or *FeatureMembershipConfigmanagementPolicyController", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementPolicyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementPolicyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyController", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExemptableNamespaces, actual.ExemptableNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptableNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReferentialRulesEnabled, actual.ReferentialRulesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReferentialRulesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LogDeniesEnabled, actual.LogDeniesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogDeniesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MutationEnabled, actual.MutationEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MutationEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Monitoring, actual.Monitoring, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle, EmptyObject: EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Monitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TemplateLibraryInstalled, actual.TemplateLibraryInstalled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TemplateLibraryInstalled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuditIntervalSeconds, actual.AuditIntervalSeconds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuditIntervalSeconds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementPolicyControllerMonitoringNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyControllerMonitoring or *FeatureMembershipConfigmanagementPolicyControllerMonitoring", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementPolicyControllerMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementPolicyControllerMonitoring", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Backends, actual.Backends, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Backends")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController or *FeatureMembershipConfigmanagementHierarchyController", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnablePodTreeLabels, actual.EnablePodTreeLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnablePodTreeLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHierarchicalResourceQuota, actual.EnableHierarchicalResourceQuota, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHierarchicalResourceQuota")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontroller) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontroller) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontroller or *FeatureMembershipPolicycontroller", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontroller) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontroller) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontroller", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Version")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyControllerHubConfig, actual.PolicyControllerHubConfig, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyControllerHubConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfig or *FeatureMembershipPolicycontrollerPolicyControllerHubConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.InstallSpec, actual.InstallSpec, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstallSpec")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExemptableNamespaces, actual.ExemptableNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptableNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReferentialRulesEnabled, actual.ReferentialRulesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReferentialRulesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.LogDeniesEnabled, actual.LogDeniesEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogDeniesEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MutationEnabled, actual.MutationEnabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MutationEnabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Monitoring, actual.Monitoring, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Monitoring")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuditIntervalSeconds, actual.AuditIntervalSeconds, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuditIntervalSeconds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConstraintViolationLimit, actual.ConstraintViolationLimit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConstraintViolationLimit")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PolicyContent, actual.PolicyContent, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyContent")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeploymentConfigs, actual.DeploymentConfigs, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeploymentConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Backends, actual.Backends, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Backends")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TemplateLibrary, actual.TemplateLibrary, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TemplateLibrary")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Bundles, actual.Bundles, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Bundles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Installation, actual.Installation, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Installation")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ExemptedNamespaces, actual.ExemptedNamespaces, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExemptedNamespaces")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ReplicaCount, actual.ReplicaCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReplicaCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ContainerResources, actual.ContainerResources, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContainerResources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodAffinity, actual.PodAffinity, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodAffinity")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PodTolerations, actual.PodTolerations, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PodTolerations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Limits, actual.Limits, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Limits")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Requests, actual.Requests, dcl.DiffInfo{ObjectFunction: compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle, EmptyObject: EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Requests")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Memory, actual.Memory, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Memory")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cpu, actual.Cpu, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cpu")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Memory, actual.Memory, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Memory")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Cpu, actual.Cpu, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Cpu")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations or *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + if !ok { + return nil, fmt.Errorf("obj %v is not a FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Operator, actual.Operator, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Operator")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Value")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Effect, actual.Effect, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Effect")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *FeatureMembership) urlNormalized() *FeatureMembership { + normalized := dcl.Copy(*r).(FeatureMembership) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Feature = dcl.SelfLinkToName(r.Feature) + normalized.Membership = dcl.SelfLinkToName(r.Membership) + normalized.MembershipLocation = dcl.SelfLinkToName(r.MembershipLocation) + return &normalized +} + +func (r *FeatureMembership) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateFeatureMembership" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "feature": dcl.ValueOrEmptyString(nr.Feature), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the FeatureMembership resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *FeatureMembership) marshal(c *Client) ([]byte, error) { + m, err := expandFeatureMembership(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling FeatureMembership: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalFeatureMembership decodes JSON responses into the FeatureMembership resource schema. +func unmarshalFeatureMembership(b []byte, c *Client, res *FeatureMembership) (*FeatureMembership, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapFeatureMembership(m, c, res) +} + +func unmarshalMapFeatureMembership(m map[string]interface{}, c *Client, res *FeatureMembership) (*FeatureMembership, error) { + + flattened := flattenFeatureMembership(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandFeatureMembership expands FeatureMembership into a JSON request object. +func expandFeatureMembership(c *Client, f *FeatureMembership) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := expandFeatureMembershipMesh(c, f.Mesh, res); err != nil { + return nil, fmt.Errorf("error expanding Mesh into mesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["mesh"] = v + } + if v, err := expandFeatureMembershipConfigmanagement(c, f.Configmanagement, res); err != nil { + return nil, fmt.Errorf("error expanding Configmanagement into configmanagement: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["configmanagement"] = v + } + if v, err := expandFeatureMembershipPolicycontroller(c, f.Policycontroller, res); err != nil { + return nil, fmt.Errorf("error expanding Policycontroller into policycontroller: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policycontroller"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Feature into feature: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["feature"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Membership into membership: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["membership"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding MembershipLocation into membershipLocation: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["membershipLocation"] = v + } + + return m, nil +} + +// flattenFeatureMembership flattens FeatureMembership from a JSON request object into the +// FeatureMembership type. +func flattenFeatureMembership(c *Client, i interface{}, res *FeatureMembership) *FeatureMembership { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &FeatureMembership{} + resultRes.Mesh = flattenFeatureMembershipMesh(c, m["mesh"], res) + resultRes.Configmanagement = flattenFeatureMembershipConfigmanagement(c, m["configmanagement"], res) + resultRes.Policycontroller = flattenFeatureMembershipPolicycontroller(c, m["policycontroller"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Feature = dcl.FlattenString(m["feature"]) + resultRes.Membership = dcl.FlattenString(m["membership"]) + resultRes.MembershipLocation = dcl.FlattenString(m["membershipLocation"]) + + return resultRes +} + +// expandFeatureMembershipMeshMap expands the contents of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMeshMap(c *Client, f map[string]FeatureMembershipMesh, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipMesh(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipMeshSlice expands the contents of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMeshSlice(c *Client, f []FeatureMembershipMesh, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipMesh(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipMeshMap flattens the contents of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMeshMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMesh { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMesh{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMesh{} + } + + items := make(map[string]FeatureMembershipMesh) + for k, item := range a { + items[k] = *flattenFeatureMembershipMesh(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipMeshSlice flattens the contents of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMeshSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMesh { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMesh{} + } + + if len(a) == 0 { + return []FeatureMembershipMesh{} + } + + items := make([]FeatureMembershipMesh, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMesh(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipMesh expands an instance of FeatureMembershipMesh into a JSON +// request object. +func expandFeatureMembershipMesh(c *Client, f *FeatureMembershipMesh, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Management; !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + if v := f.ControlPlane; !dcl.IsEmptyValueIndirect(v) { + m["controlPlane"] = v + } + + return m, nil +} + +// flattenFeatureMembershipMesh flattens an instance of FeatureMembershipMesh from a JSON +// response object. +func flattenFeatureMembershipMesh(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipMesh { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipMesh{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipMesh + } + r.Management = flattenFeatureMembershipMeshManagementEnum(m["management"]) + r.ControlPlane = flattenFeatureMembershipMeshControlPlaneEnum(m["controlPlane"]) + + return r +} + +// expandFeatureMembershipConfigmanagementMap expands the contents of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagementMap(c *Client, f map[string]FeatureMembershipConfigmanagement, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagement(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementSlice expands the contents of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagementSlice(c *Client, f []FeatureMembershipConfigmanagement, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagement(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementMap flattens the contents of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagement { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagement{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagement{} + } + + items := make(map[string]FeatureMembershipConfigmanagement) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagement(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementSlice flattens the contents of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagement { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagement{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagement{} + } + + items := make([]FeatureMembershipConfigmanagement, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagement(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagement expands an instance of FeatureMembershipConfigmanagement into a JSON +// request object. +func expandFeatureMembershipConfigmanagement(c *Client, f *FeatureMembershipConfigmanagement, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipConfigmanagementConfigSync(c, f.ConfigSync, res); err != nil { + return nil, fmt.Errorf("error expanding ConfigSync into configSync: %w", err) + } else if v != nil { + m["configSync"] = v + } + if v, err := expandFeatureMembershipConfigmanagementPolicyController(c, f.PolicyController, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyController into policyController: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyController"] = v + } + if v, err := expandHierarchyControllerConfig(c, f.HierarchyController, res); err != nil { + return nil, fmt.Errorf("error expanding HierarchyController into hierarchyController: %w", err) + } else if v != nil { + m["hierarchyController"] = v + } + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v := f.Management; !dcl.IsEmptyValueIndirect(v) { + m["management"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagement flattens an instance of FeatureMembershipConfigmanagement from a JSON +// response object. +func flattenFeatureMembershipConfigmanagement(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagement { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagement{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagement + } + r.ConfigSync = flattenFeatureMembershipConfigmanagementConfigSync(c, m["configSync"], res) + r.PolicyController = flattenFeatureMembershipConfigmanagementPolicyController(c, m["policyController"], res) + r.HierarchyController = flattenHierarchyControllerConfig(c, m["hierarchyController"], res) + r.Version = dcl.FlattenString(m["version"]) + r.Management = flattenFeatureMembershipConfigmanagementManagementEnum(m["management"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncMap expands the contents of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSync(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncSlice expands the contents of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSync(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncMap flattens the contents of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSync { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSync{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSync{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSync) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSync(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncSlice flattens the contents of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSync { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSync{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSync{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSync, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSync(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSync expands an instance of FeatureMembershipConfigmanagementConfigSync into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSync(c *Client, f *FeatureMembershipConfigmanagementConfigSync, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, f.DeploymentOverrides, res); err != nil { + return nil, fmt.Errorf("error expanding DeploymentOverrides into deploymentOverrides: %w", err) + } else if v != nil { + m["deploymentOverrides"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, f.Git, res); err != nil { + return nil, fmt.Errorf("error expanding Git into git: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["git"] = v + } + if v := f.SourceFormat; !dcl.IsEmptyValueIndirect(v) { + m["sourceFormat"] = v + } + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.StopSyncing; !dcl.IsEmptyValueIndirect(v) { + m["stopSyncing"] = v + } + if v := f.PreventDrift; !dcl.IsEmptyValueIndirect(v) { + m["preventDrift"] = v + } + if v := f.MetricsGcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["metricsGcpServiceAccountEmail"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, f.Oci, res); err != nil { + return nil, fmt.Errorf("error expanding Oci into oci: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["oci"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSync flattens an instance of FeatureMembershipConfigmanagementConfigSync from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSync(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSync { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSync{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSync + } + r.DeploymentOverrides = flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c, m["deploymentOverrides"], res) + r.Git = flattenFeatureMembershipConfigmanagementConfigSyncGit(c, m["git"], res) + r.SourceFormat = dcl.FlattenString(m["sourceFormat"]) + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.StopSyncing = dcl.FlattenBool(m["stopSyncing"]) + r.PreventDrift = dcl.FlattenBool(m["preventDrift"]) + r.MetricsGcpServiceAccountEmail = dcl.FlattenString(m["metricsGcpServiceAccountEmail"]) + r.Oci = flattenFeatureMembershipConfigmanagementConfigSyncOci(c, m["oci"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides expands an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, f *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.DeploymentName; !dcl.IsEmptyValueIndirect(v) { + m["deploymentName"] = v + } + if v := f.DeploymentNamespace; !dcl.IsEmptyValueIndirect(v) { + m["deploymentNamespace"] = v + } + if v, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, f.Containers, res); err != nil { + return nil, fmt.Errorf("error expanding Containers into containers: %w", err) + } else if v != nil { + m["containers"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides flattens an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } + r.DeploymentName = dcl.FlattenString(m["deploymentName"]) + r.DeploymentNamespace = dcl.FlattenString(m["deploymentNamespace"]) + r.Containers = flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c, m["containers"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers expands an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, f *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ContainerName; !dcl.IsEmptyValueIndirect(v) { + m["containerName"] = v + } + if v := f.CpuRequest; !dcl.IsEmptyValueIndirect(v) { + m["cpuRequest"] = v + } + if v := f.MemoryRequest; !dcl.IsEmptyValueIndirect(v) { + m["memoryRequest"] = v + } + if v := f.CpuLimit; !dcl.IsEmptyValueIndirect(v) { + m["cpuLimit"] = v + } + if v := f.MemoryLimit; !dcl.IsEmptyValueIndirect(v) { + m["memoryLimit"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers flattens an instance of FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } + r.ContainerName = dcl.FlattenString(m["containerName"]) + r.CpuRequest = dcl.FlattenString(m["cpuRequest"]) + r.MemoryRequest = dcl.FlattenString(m["memoryRequest"]) + r.CpuLimit = dcl.FlattenString(m["cpuLimit"]) + r.MemoryLimit = dcl.FlattenString(m["memoryLimit"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncGitMap expands the contents of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGitMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncGitSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncGit(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGitMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGitMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncGit { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncGit{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncGit{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncGit) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncGit(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGitSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGitSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncGit { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncGit{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncGit{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncGit, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncGit(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncGit expands an instance of FeatureMembershipConfigmanagementConfigSyncGit into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncGit(c *Client, f *FeatureMembershipConfigmanagementConfigSyncGit, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SyncRepo; !dcl.IsEmptyValueIndirect(v) { + m["syncRepo"] = v + } + if v := f.SyncBranch; !dcl.IsEmptyValueIndirect(v) { + m["syncBranch"] = v + } + if v := f.PolicyDir; !dcl.IsEmptyValueIndirect(v) { + m["policyDir"] = v + } + if v := f.SyncWaitSecs; !dcl.IsEmptyValueIndirect(v) { + m["syncWaitSecs"] = v + } + if v := f.SyncRev; !dcl.IsEmptyValueIndirect(v) { + m["syncRev"] = v + } + if v := f.SecretType; !dcl.IsEmptyValueIndirect(v) { + m["secretType"] = v + } + if v := f.HttpsProxy; !dcl.IsEmptyValueIndirect(v) { + m["httpsProxy"] = v + } + if v := f.GcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["gcpServiceAccountEmail"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncGit flattens an instance of FeatureMembershipConfigmanagementConfigSyncGit from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncGit(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncGit { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncGit{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + r.SyncRepo = dcl.FlattenString(m["syncRepo"]) + r.SyncBranch = dcl.FlattenString(m["syncBranch"]) + r.PolicyDir = dcl.FlattenString(m["policyDir"]) + r.SyncWaitSecs = dcl.FlattenString(m["syncWaitSecs"]) + r.SyncRev = dcl.FlattenString(m["syncRev"]) + r.SecretType = dcl.FlattenString(m["secretType"]) + r.HttpsProxy = dcl.FlattenString(m["httpsProxy"]) + r.GcpServiceAccountEmail = dcl.FlattenString(m["gcpServiceAccountEmail"]) + + return r +} + +// expandFeatureMembershipConfigmanagementConfigSyncOciMap expands the contents of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOciMap(c *Client, f map[string]FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementConfigSyncOciSlice expands the contents of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, f []FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementConfigSyncOci(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOciMap flattens the contents of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOciMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementConfigSyncOci { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementConfigSyncOci{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementConfigSyncOci{} + } + + items := make(map[string]FeatureMembershipConfigmanagementConfigSyncOci) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementConfigSyncOci(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOciSlice flattens the contents of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOciSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementConfigSyncOci { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementConfigSyncOci{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementConfigSyncOci{} + } + + items := make([]FeatureMembershipConfigmanagementConfigSyncOci, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementConfigSyncOci(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementConfigSyncOci expands an instance of FeatureMembershipConfigmanagementConfigSyncOci into a JSON +// request object. +func expandFeatureMembershipConfigmanagementConfigSyncOci(c *Client, f *FeatureMembershipConfigmanagementConfigSyncOci, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.SyncRepo; !dcl.IsEmptyValueIndirect(v) { + m["syncRepo"] = v + } + if v := f.PolicyDir; !dcl.IsEmptyValueIndirect(v) { + m["policyDir"] = v + } + if v := f.SyncWaitSecs; !dcl.IsEmptyValueIndirect(v) { + m["syncWaitSecs"] = v + } + if v := f.SecretType; !dcl.IsEmptyValueIndirect(v) { + m["secretType"] = v + } + if v := f.GcpServiceAccountEmail; !dcl.IsEmptyValueIndirect(v) { + m["gcpServiceAccountEmail"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementConfigSyncOci flattens an instance of FeatureMembershipConfigmanagementConfigSyncOci from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementConfigSyncOci(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementConfigSyncOci { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementConfigSyncOci{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + r.SyncRepo = dcl.FlattenString(m["syncRepo"]) + r.PolicyDir = dcl.FlattenString(m["policyDir"]) + r.SyncWaitSecs = dcl.FlattenString(m["syncWaitSecs"]) + r.SecretType = dcl.FlattenString(m["secretType"]) + r.GcpServiceAccountEmail = dcl.FlattenString(m["gcpServiceAccountEmail"]) + + return r +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMap expands the contents of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMap(c *Client, f map[string]FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyController(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementPolicyControllerSlice expands the contents of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, f []FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyController(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMap flattens the contents of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyController { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyController{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyController{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyController) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyController(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerSlice flattens the contents of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyController { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyController{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyController{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyController, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyController(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementPolicyController expands an instance of FeatureMembershipConfigmanagementPolicyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyController(c *Client, f *FeatureMembershipConfigmanagementPolicyController, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.ExemptableNamespaces; v != nil { + m["exemptableNamespaces"] = v + } + if v := f.ReferentialRulesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["referentialRulesEnabled"] = v + } + if v := f.LogDeniesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["logDeniesEnabled"] = v + } + if v := f.MutationEnabled; !dcl.IsEmptyValueIndirect(v) { + m["mutationEnabled"] = v + } + if v, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, f.Monitoring, res); err != nil { + return nil, fmt.Errorf("error expanding Monitoring into monitoring: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoring"] = v + } + if v := f.TemplateLibraryInstalled; !dcl.IsEmptyValueIndirect(v) { + m["templateLibraryInstalled"] = v + } + if v := f.AuditIntervalSeconds; !dcl.IsEmptyValueIndirect(v) { + m["auditIntervalSeconds"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyController flattens an instance of FeatureMembershipConfigmanagementPolicyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyController(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementPolicyController { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementPolicyController{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementPolicyController + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.ExemptableNamespaces = dcl.FlattenStringSlice(m["exemptableNamespaces"]) + r.ReferentialRulesEnabled = dcl.FlattenBool(m["referentialRulesEnabled"]) + r.LogDeniesEnabled = dcl.FlattenBool(m["logDeniesEnabled"]) + r.MutationEnabled = dcl.FlattenBool(m["mutationEnabled"]) + r.Monitoring = flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, m["monitoring"], res) + r.TemplateLibraryInstalled = dcl.FlattenBool(m["templateLibraryInstalled"]) + r.AuditIntervalSeconds = dcl.FlattenString(m["auditIntervalSeconds"]) + + return r +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoringMap expands the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoringMap(c *Client, f map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice expands the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, f []FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringMap flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoring) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyControllerMonitoring { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoring, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementPolicyControllerMonitoring expands an instance of FeatureMembershipConfigmanagementPolicyControllerMonitoring into a JSON +// request object. +func expandFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, f *FeatureMembershipConfigmanagementPolicyControllerMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Backends; v != nil { + m["backends"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring flattens an instance of FeatureMembershipConfigmanagementPolicyControllerMonitoring from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoring(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementPolicyControllerMonitoring + } + r.Backends = flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice(c, m["backends"], res) + + return r +} + +// expandFeatureMembershipConfigmanagementHierarchyControllerMap expands the contents of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyControllerMap(c *Client, f map[string]FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipConfigmanagementHierarchyController(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipConfigmanagementHierarchyControllerSlice expands the contents of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, f []FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipConfigmanagementHierarchyController(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipConfigmanagementHierarchyControllerMap flattens the contents of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyControllerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementHierarchyController { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementHierarchyController{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementHierarchyController{} + } + + items := make(map[string]FeatureMembershipConfigmanagementHierarchyController) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementHierarchyController(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementHierarchyControllerSlice flattens the contents of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyControllerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementHierarchyController { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementHierarchyController{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementHierarchyController{} + } + + items := make([]FeatureMembershipConfigmanagementHierarchyController, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementHierarchyController(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipConfigmanagementHierarchyController expands an instance of FeatureMembershipConfigmanagementHierarchyController into a JSON +// request object. +func expandFeatureMembershipConfigmanagementHierarchyController(c *Client, f *FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Enabled; v != nil { + m["enabled"] = v + } + if v := f.EnablePodTreeLabels; v != nil { + m["enablePodTreeLabels"] = v + } + if v := f.EnableHierarchicalResourceQuota; v != nil { + m["enableHierarchicalResourceQuota"] = v + } + + return m, nil +} + +// flattenFeatureMembershipConfigmanagementHierarchyController flattens an instance of FeatureMembershipConfigmanagementHierarchyController from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementHierarchyController(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipConfigmanagementHierarchyController { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementHierarchyController{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipConfigmanagementHierarchyController + } + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.EnablePodTreeLabels = dcl.FlattenBool(m["enablePodTreeLabels"]) + r.EnableHierarchicalResourceQuota = dcl.FlattenBool(m["enableHierarchicalResourceQuota"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerMap expands the contents of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerMap(c *Client, f map[string]FeatureMembershipPolicycontroller, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontroller(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerSlice expands the contents of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerSlice(c *Client, f []FeatureMembershipPolicycontroller, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontroller(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerMap flattens the contents of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontroller { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontroller{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontroller{} + } + + items := make(map[string]FeatureMembershipPolicycontroller) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontroller(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerSlice flattens the contents of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontroller { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontroller{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontroller{} + } + + items := make([]FeatureMembershipPolicycontroller, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontroller(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontroller expands an instance of FeatureMembershipPolicycontroller into a JSON +// request object. +func expandFeatureMembershipPolicycontroller(c *Client, f *FeatureMembershipPolicycontroller, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Version; !dcl.IsEmptyValueIndirect(v) { + m["version"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, f.PolicyControllerHubConfig, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyControllerHubConfig into policyControllerHubConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyControllerHubConfig"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontroller flattens an instance of FeatureMembershipPolicycontroller from a JSON +// response object. +func flattenFeatureMembershipPolicycontroller(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontroller { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontroller{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontroller + } + r.Version = dcl.FlattenString(m["version"]) + r.PolicyControllerHubConfig = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, m["policyControllerHubConfig"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfig) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfig into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfig, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.InstallSpec; !dcl.IsEmptyValueIndirect(v) { + m["installSpec"] = v + } + if v := f.ExemptableNamespaces; v != nil { + m["exemptableNamespaces"] = v + } + if v := f.ReferentialRulesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["referentialRulesEnabled"] = v + } + if v := f.LogDeniesEnabled; !dcl.IsEmptyValueIndirect(v) { + m["logDeniesEnabled"] = v + } + if v := f.MutationEnabled; !dcl.IsEmptyValueIndirect(v) { + m["mutationEnabled"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, f.Monitoring, res); err != nil { + return nil, fmt.Errorf("error expanding Monitoring into monitoring: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["monitoring"] = v + } + if v := f.AuditIntervalSeconds; !dcl.IsEmptyValueIndirect(v) { + m["auditIntervalSeconds"] = v + } + if v := f.ConstraintViolationLimit; !dcl.IsEmptyValueIndirect(v) { + m["constraintViolationLimit"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, f.PolicyContent, res); err != nil { + return nil, fmt.Errorf("error expanding PolicyContent into policyContent: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["policyContent"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c, f.DeploymentConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding DeploymentConfigs into deploymentConfigs: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["deploymentConfigs"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfig from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfig(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + r.InstallSpec = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(m["installSpec"]) + r.ExemptableNamespaces = dcl.FlattenStringSlice(m["exemptableNamespaces"]) + r.ReferentialRulesEnabled = dcl.FlattenBool(m["referentialRulesEnabled"]) + r.LogDeniesEnabled = dcl.FlattenBool(m["logDeniesEnabled"]) + r.MutationEnabled = dcl.FlattenBool(m["mutationEnabled"]) + r.Monitoring = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, m["monitoring"], res) + r.AuditIntervalSeconds = dcl.FlattenInteger(m["auditIntervalSeconds"]) + r.ConstraintViolationLimit = dcl.FlattenInteger(m["constraintViolationLimit"]) + r.PolicyContent = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, m["policyContent"], res) + r.DeploymentConfigs = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c, m["deploymentConfigs"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Backends; v != nil { + m["backends"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring + } + r.Backends = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice(c, m["backends"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, f.TemplateLibrary, res); err != nil { + return nil, fmt.Errorf("error expanding TemplateLibrary into templateLibrary: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["templateLibrary"] = v + } + if v, err := alsoExpandEmptyBundlesInMap(c, f.Bundles, res); err != nil { + return nil, fmt.Errorf("error expanding Bundles into bundles: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["bundles"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent + } + r.TemplateLibrary = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, m["templateLibrary"], res) + r.Bundles = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c, m["bundles"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Installation; !dcl.IsEmptyValueIndirect(v) { + m["installation"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary + } + r.Installation = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(m["installation"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ExemptedNamespaces; v != nil { + m["exemptedNamespaces"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } + r.ExemptedNamespaces = dcl.FlattenStringSlice(m["exemptedNamespaces"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ReplicaCount; !dcl.IsEmptyValueIndirect(v) { + m["replicaCount"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, f.ContainerResources, res); err != nil { + return nil, fmt.Errorf("error expanding ContainerResources into containerResources: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["containerResources"] = v + } + if v := f.PodAffinity; !dcl.IsEmptyValueIndirect(v) { + m["podAffinity"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, f.PodTolerations, res); err != nil { + return nil, fmt.Errorf("error expanding PodTolerations into podTolerations: %w", err) + } else if v != nil { + m["podTolerations"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs + } + r.ReplicaCount = dcl.FlattenInteger(m["replicaCount"]) + r.ContainerResources = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, m["containerResources"], res) + r.PodAffinity = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(m["podAffinity"]) + r.PodTolerations = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c, m["podTolerations"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, f.Limits, res); err != nil { + return nil, fmt.Errorf("error expanding Limits into limits: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["limits"] = v + } + if v, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, f.Requests, res); err != nil { + return nil, fmt.Errorf("error expanding Requests into requests: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["requests"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + r.Limits = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, m["limits"], res) + r.Requests = flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, m["requests"], res) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Memory; !dcl.IsEmptyValueIndirect(v) { + m["memory"] = v + } + if v := f.Cpu; !dcl.IsEmptyValueIndirect(v) { + m["cpu"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + r.Memory = dcl.FlattenString(m["memory"]) + r.Cpu = dcl.FlattenString(m["cpu"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests, res *FeatureMembership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Memory; !dcl.IsEmptyValueIndirect(v) { + m["memory"] = v + } + if v := f.Cpu; !dcl.IsEmptyValueIndirect(v) { + m["cpu"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + r.Memory = dcl.FlattenString(m["memory"]) + r.Cpu = dcl.FlattenString(m["cpu"]) + + return r +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice expands the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, f []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations expands an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations into a JSON +// request object. +func expandFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, res *FeatureMembership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Key; !dcl.IsEmptyValueIndirect(v) { + m["key"] = v + } + if v := f.Operator; !dcl.IsEmptyValueIndirect(v) { + m["operator"] = v + } + if v := f.Value; !dcl.IsEmptyValueIndirect(v) { + m["value"] = v + } + if v := f.Effect; !dcl.IsEmptyValueIndirect(v) { + m["effect"] = v + } + + return m, nil +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations flattens an instance of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(c *Client, i interface{}, res *FeatureMembership) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } + r.Key = dcl.FlattenString(m["key"]) + r.Operator = dcl.FlattenString(m["operator"]) + r.Value = dcl.FlattenString(m["value"]) + r.Effect = dcl.FlattenString(m["effect"]) + + return r +} + +// flattenFeatureMembershipMeshManagementEnumMap flattens the contents of FeatureMembershipMeshManagementEnum from a JSON +// response object. +func flattenFeatureMembershipMeshManagementEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMeshManagementEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMeshManagementEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMeshManagementEnum{} + } + + items := make(map[string]FeatureMembershipMeshManagementEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipMeshManagementEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipMeshManagementEnumSlice flattens the contents of FeatureMembershipMeshManagementEnum from a JSON +// response object. +func flattenFeatureMembershipMeshManagementEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMeshManagementEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMeshManagementEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipMeshManagementEnum{} + } + + items := make([]FeatureMembershipMeshManagementEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMeshManagementEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipMeshManagementEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipMeshManagementEnum with the same value as that string. +func flattenFeatureMembershipMeshManagementEnum(i interface{}) *FeatureMembershipMeshManagementEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipMeshManagementEnumRef(s) +} + +// flattenFeatureMembershipMeshControlPlaneEnumMap flattens the contents of FeatureMembershipMeshControlPlaneEnum from a JSON +// response object. +func flattenFeatureMembershipMeshControlPlaneEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipMeshControlPlaneEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipMeshControlPlaneEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipMeshControlPlaneEnum{} + } + + items := make(map[string]FeatureMembershipMeshControlPlaneEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipMeshControlPlaneEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipMeshControlPlaneEnumSlice flattens the contents of FeatureMembershipMeshControlPlaneEnum from a JSON +// response object. +func flattenFeatureMembershipMeshControlPlaneEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipMeshControlPlaneEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipMeshControlPlaneEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipMeshControlPlaneEnum{} + } + + items := make([]FeatureMembershipMeshControlPlaneEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipMeshControlPlaneEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipMeshControlPlaneEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipMeshControlPlaneEnum with the same value as that string. +func flattenFeatureMembershipMeshControlPlaneEnum(i interface{}) *FeatureMembershipMeshControlPlaneEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipMeshControlPlaneEnumRef(s) +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumMap flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + items := make(map[string]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice flattens the contents of FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum{} + } + + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum with the same value as that string. +func flattenFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum(i interface{}) *FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(s) +} + +// flattenFeatureMembershipConfigmanagementManagementEnumMap flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make(map[string]FeatureMembershipConfigmanagementManagementEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnumSlice flattens the contents of FeatureMembershipConfigmanagementManagementEnum from a JSON +// response object. +func flattenFeatureMembershipConfigmanagementManagementEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipConfigmanagementManagementEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipConfigmanagementManagementEnum{} + } + + items := make([]FeatureMembershipConfigmanagementManagementEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipConfigmanagementManagementEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipConfigmanagementManagementEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipConfigmanagementManagementEnum with the same value as that string. +func flattenFeatureMembershipConfigmanagementManagementEnum(i interface{}) *FeatureMembershipConfigmanagementManagementEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipConfigmanagementManagementEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(s) +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumMap flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumMap(c *Client, i interface{}, res *FeatureMembership) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + if len(a) == 0 { + return map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum) + for k, item := range a { + items[k] = *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(item.(interface{})) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumSlice flattens the contents of FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum from a JSON +// response object. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumSlice(c *Client, i interface{}, res *FeatureMembership) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + a, ok := i.([]interface{}) + if !ok { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + if len(a) == 0 { + return []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum{} + } + + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(item.(interface{}))) + } + + return items +} + +// flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum asserts that an interface is a string, and returns a +// pointer to a *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum with the same value as that string. +func flattenFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum(i interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *FeatureMembership) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalFeatureMembership(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Feature == nil && ncr.Feature == nil { + c.Config.Logger.Info("Both Feature fields null - considering equal.") + } else if nr.Feature == nil || ncr.Feature == nil { + c.Config.Logger.Info("Only one Feature field is null - considering unequal.") + return false + } else if *nr.Feature != *ncr.Feature { + return false + } + return true + } +} + +type featureMembershipDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp featureMembershipApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToFeatureMembershipDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]featureMembershipDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []featureMembershipDiff + // For each operation name, create a featureMembershipDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := featureMembershipDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToFeatureMembershipApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToFeatureMembershipApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (featureMembershipApiOperation, error) { + switch opName { + + case "updateFeatureMembershipUpdateFeatureMembershipOperation": + return &updateFeatureMembershipUpdateFeatureMembershipOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractFeatureMembershipFields(r *FeatureMembership) error { + vMesh := r.Mesh + if vMesh == nil { + // note: explicitly not the empty object. + vMesh = &FeatureMembershipMesh{} + } + if err := extractFeatureMembershipMeshFields(r, vMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMesh) { + r.Mesh = vMesh + } + vConfigmanagement := r.Configmanagement + if vConfigmanagement == nil { + // note: explicitly not the empty object. + vConfigmanagement = &FeatureMembershipConfigmanagement{} + } + if err := extractFeatureMembershipConfigmanagementFields(r, vConfigmanagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigmanagement) { + r.Configmanagement = vConfigmanagement + } + vPolicycontroller := r.Policycontroller + if vPolicycontroller == nil { + // note: explicitly not the empty object. + vPolicycontroller = &FeatureMembershipPolicycontroller{} + } + if err := extractFeatureMembershipPolicycontrollerFields(r, vPolicycontroller); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicycontroller) { + r.Policycontroller = vPolicycontroller + } + return nil +} +func extractFeatureMembershipMeshFields(r *FeatureMembership, o *FeatureMembershipMesh) error { + return nil +} +func extractFeatureMembershipConfigmanagementFields(r *FeatureMembership, o *FeatureMembershipConfigmanagement) error { + vConfigSync := o.ConfigSync + if vConfigSync == nil { + // note: explicitly not the empty object. + vConfigSync = &FeatureMembershipConfigmanagementConfigSync{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncFields(r, vConfigSync); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigSync) { + o.ConfigSync = vConfigSync + } + vPolicyController := o.PolicyController + if vPolicyController == nil { + // note: explicitly not the empty object. + vPolicyController = &FeatureMembershipConfigmanagementPolicyController{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerFields(r, vPolicyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyController) { + o.PolicyController = vPolicyController + } + vHierarchyController := o.HierarchyController + if vHierarchyController == nil { + // note: explicitly not the empty object. + vHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{} + } + if err := extractFeatureMembershipConfigmanagementHierarchyControllerFields(r, vHierarchyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHierarchyController) { + o.HierarchyController = vHierarchyController + } + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSync) error { + vGit := o.Git + if vGit == nil { + // note: explicitly not the empty object. + vGit = &FeatureMembershipConfigmanagementConfigSyncGit{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncGitFields(r, vGit); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGit) { + o.Git = vGit + } + vOci := o.Oci + if vOci == nil { + // note: explicitly not the empty object. + vOci = &FeatureMembershipConfigmanagementConfigSyncOci{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncOciFields(r, vOci); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vOci) { + o.Oci = vOci + } + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncGitFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncGit) error { + return nil +} +func extractFeatureMembershipConfigmanagementConfigSyncOciFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncOci) error { + return nil +} +func extractFeatureMembershipConfigmanagementPolicyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyController) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + return nil +} +func extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyControllerMonitoring) error { + return nil +} +func extractFeatureMembershipConfigmanagementHierarchyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementHierarchyController) error { + return nil +} +func extractFeatureMembershipPolicycontrollerFields(r *FeatureMembership, o *FeatureMembershipPolicycontroller) error { + vPolicyControllerHubConfig := o.PolicyControllerHubConfig + if vPolicyControllerHubConfig == nil { + // note: explicitly not the empty object. + vPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r, vPolicyControllerHubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyControllerHubConfig) { + o.PolicyControllerHubConfig = vPolicyControllerHubConfig + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + vPolicyContent := o.PolicyContent + if vPolicyContent == nil { + // note: explicitly not the empty object. + vPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r, vPolicyContent); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyContent) { + o.PolicyContent = vPolicyContent + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) error { + vTemplateLibrary := o.TemplateLibrary + if vTemplateLibrary == nil { + // note: explicitly not the empty object. + vTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r, vTemplateLibrary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTemplateLibrary) { + o.TemplateLibrary = vTemplateLibrary + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) error { + vContainerResources := o.ContainerResources + if vContainerResources == nil { + // note: explicitly not the empty object. + vContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r, vContainerResources); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vContainerResources) { + o.ContainerResources = vContainerResources + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) error { + vLimits := o.Limits + if vLimits == nil { + // note: explicitly not the empty object. + vLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r, vLimits); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLimits) { + o.Limits = vLimits + } + vRequests := o.Requests + if vRequests == nil { + // note: explicitly not the empty object. + vRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r, vRequests); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRequests) { + o.Requests = vRequests + } + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) error { + return nil +} +func extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) error { + return nil +} + +func postReadExtractFeatureMembershipFields(r *FeatureMembership) error { + vMesh := r.Mesh + if vMesh == nil { + // note: explicitly not the empty object. + vMesh = &FeatureMembershipMesh{} + } + if err := postReadExtractFeatureMembershipMeshFields(r, vMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMesh) { + r.Mesh = vMesh + } + vConfigmanagement := r.Configmanagement + if vConfigmanagement == nil { + // note: explicitly not the empty object. + vConfigmanagement = &FeatureMembershipConfigmanagement{} + } + if err := postReadExtractFeatureMembershipConfigmanagementFields(r, vConfigmanagement); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigmanagement) { + r.Configmanagement = vConfigmanagement + } + vPolicycontroller := r.Policycontroller + if vPolicycontroller == nil { + // note: explicitly not the empty object. + vPolicycontroller = &FeatureMembershipPolicycontroller{} + } + if err := postReadExtractFeatureMembershipPolicycontrollerFields(r, vPolicycontroller); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicycontroller) { + r.Policycontroller = vPolicycontroller + } + return nil +} +func postReadExtractFeatureMembershipMeshFields(r *FeatureMembership, o *FeatureMembershipMesh) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementFields(r *FeatureMembership, o *FeatureMembershipConfigmanagement) error { + vConfigSync := o.ConfigSync + if vConfigSync == nil { + // note: explicitly not the empty object. + vConfigSync = &FeatureMembershipConfigmanagementConfigSync{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncFields(r, vConfigSync); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vConfigSync) { + o.ConfigSync = vConfigSync + } + vPolicyController := o.PolicyController + if vPolicyController == nil { + // note: explicitly not the empty object. + vPolicyController = &FeatureMembershipConfigmanagementPolicyController{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerFields(r, vPolicyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyController) { + o.PolicyController = vPolicyController + } + vHierarchyController := o.HierarchyController + if vHierarchyController == nil { + // note: explicitly not the empty object. + vHierarchyController = &FeatureMembershipConfigmanagementHierarchyController{} + } + if err := extractFeatureMembershipConfigmanagementHierarchyControllerFields(r, vHierarchyController); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vHierarchyController) { + o.HierarchyController = vHierarchyController + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSync) error { + vGit := o.Git + if vGit == nil { + // note: explicitly not the empty object. + vGit = &FeatureMembershipConfigmanagementConfigSyncGit{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncGitFields(r, vGit); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGit) { + o.Git = vGit + } + vOci := o.Oci + if vOci == nil { + // note: explicitly not the empty object. + vOci = &FeatureMembershipConfigmanagementConfigSyncOci{} + } + if err := extractFeatureMembershipConfigmanagementConfigSyncOciFields(r, vOci); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vOci) { + o.Oci = vOci + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncGitFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncGit) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementConfigSyncOciFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementConfigSyncOci) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementPolicyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyController) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipConfigmanagementPolicyControllerMonitoring{} + } + if err := extractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + return nil +} +func postReadExtractFeatureMembershipConfigmanagementPolicyControllerMonitoringFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementPolicyControllerMonitoring) error { + return nil +} +func postReadExtractFeatureMembershipConfigmanagementHierarchyControllerFields(r *FeatureMembership, o *FeatureMembershipConfigmanagementHierarchyController) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerFields(r *FeatureMembership, o *FeatureMembershipPolicycontroller) error { + vPolicyControllerHubConfig := o.PolicyControllerHubConfig + if vPolicyControllerHubConfig == nil { + // note: explicitly not the empty object. + vPolicyControllerHubConfig = &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r, vPolicyControllerHubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyControllerHubConfig) { + o.PolicyControllerHubConfig = vPolicyControllerHubConfig + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) error { + vMonitoring := o.Monitoring + if vMonitoring == nil { + // note: explicitly not the empty object. + vMonitoring = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r, vMonitoring); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vMonitoring) { + o.Monitoring = vMonitoring + } + vPolicyContent := o.PolicyContent + if vPolicyContent == nil { + // note: explicitly not the empty object. + vPolicyContent = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r, vPolicyContent); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vPolicyContent) { + o.PolicyContent = vPolicyContent + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) error { + vTemplateLibrary := o.TemplateLibrary + if vTemplateLibrary == nil { + // note: explicitly not the empty object. + vTemplateLibrary = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r, vTemplateLibrary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTemplateLibrary) { + o.TemplateLibrary = vTemplateLibrary + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) error { + vContainerResources := o.ContainerResources + if vContainerResources == nil { + // note: explicitly not the empty object. + vContainerResources = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r, vContainerResources); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vContainerResources) { + o.ContainerResources = vContainerResources + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) error { + vLimits := o.Limits + if vLimits == nil { + // note: explicitly not the empty object. + vLimits = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r, vLimits); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vLimits) { + o.Limits = vLimits + } + vRequests := o.Requests + if vRequests == nil { + // note: explicitly not the empty object. + vRequests = &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{} + } + if err := extractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r, vRequests); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRequests) { + o.Requests = vRequests + } + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) error { + return nil +} +func postReadExtractFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsFields(r *FeatureMembership, o *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl new file mode 100644 index 000000000000..fcc410a0896e --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl @@ -0,0 +1,807 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLFeatureMembershipSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "GkeHub/FeatureMembership", + Description: "The GkeHub FeatureMembership resource", + StructName: "FeatureMembership", + Mutex: "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a FeatureMembership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "featureMembership", + Required: true, + Description: "A full instance of a FeatureMembership", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a FeatureMembership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "featureMembership", + Required: true, + Description: "A full instance of a FeatureMembership", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a FeatureMembership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "featureMembership", + Required: true, + Description: "A full instance of a FeatureMembership", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all FeatureMembership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "feature", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many FeatureMembership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "feature", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "FeatureMembership": &dcl.Component{ + Title: "FeatureMembership", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "project", + "location", + "feature", + "membership", + }, + Properties: map[string]*dcl.Property{ + "configmanagement": &dcl.Property{ + Type: "object", + GoName: "Configmanagement", + GoType: "FeatureMembershipConfigmanagement", + Description: "Config Management-specific spec.", + Properties: map[string]*dcl.Property{ + "configSync": &dcl.Property{ + Type: "object", + GoName: "ConfigSync", + GoType: "FeatureMembershipConfigmanagementConfigSync", + Description: "Config Sync configuration for the cluster.", + SendEmpty: true, + Properties: map[string]*dcl.Property{ + "deploymentOverrides": &dcl.Property{ + Type: "array", + GoName: "DeploymentOverrides", + Description: "The override configurations for the Config Sync Deployments.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", + Properties: map[string]*dcl.Property{ + "containers": &dcl.Property{ + Type: "array", + GoName: "Containers", + Description: "The override configurations for the containers in the Deployment.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", + Properties: map[string]*dcl.Property{ + "containerName": &dcl.Property{ + Type: "string", + GoName: "ContainerName", + Description: "The name of the container.", + }, + "cpuLimit": &dcl.Property{ + Type: "string", + GoName: "CpuLimit", + Description: "The CPU limit of the container.", + }, + "cpuRequest": &dcl.Property{ + Type: "string", + GoName: "CpuRequest", + Description: "The CPU request of the container.", + }, + "memoryLimit": &dcl.Property{ + Type: "string", + GoName: "MemoryLimit", + Description: "The memory limit of the container.", + }, + "memoryRequest": &dcl.Property{ + Type: "string", + GoName: "MemoryRequest", + Description: "The memory request of the container.", + }, + }, + }, + }, + "deploymentName": &dcl.Property{ + Type: "string", + GoName: "DeploymentName", + Description: "The name of the Deployment.", + }, + "deploymentNamespace": &dcl.Property{ + Type: "string", + GoName: "DeploymentNamespace", + Description: "The namespace of the Deployment.", + }, + }, + }, + }, + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", + }, + "git": &dcl.Property{ + Type: "object", + GoName: "Git", + GoType: "FeatureMembershipConfigmanagementConfigSyncGit", + Properties: map[string]*dcl.Property{ + "gcpServiceAccountEmail": &dcl.Property{ + Type: "string", + GoName: "GcpServiceAccountEmail", + Description: "The GCP Service Account Email used for auth when secretType is gcpServiceAccount.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + }, + "httpsProxy": &dcl.Property{ + Type: "string", + GoName: "HttpsProxy", + Description: "URL for the HTTPS proxy to be used when communicating with the Git repo.", + }, + "policyDir": &dcl.Property{ + Type: "string", + GoName: "PolicyDir", + Description: "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", + }, + "secretType": &dcl.Property{ + Type: "string", + GoName: "SecretType", + Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + "syncBranch": &dcl.Property{ + Type: "string", + GoName: "SyncBranch", + Description: "The branch of the repository to sync from. Default: master.", + }, + "syncRepo": &dcl.Property{ + Type: "string", + GoName: "SyncRepo", + Description: "The URL of the Git repository to use as the source of truth.", + }, + "syncRev": &dcl.Property{ + Type: "string", + GoName: "SyncRev", + Description: "Git revision (tag or hash) to check out. Default HEAD.", + }, + "syncWaitSecs": &dcl.Property{ + Type: "string", + GoName: "SyncWaitSecs", + Description: "Period in seconds between consecutive syncs. Default: 15.", + }, + }, + }, + "metricsGcpServiceAccountEmail": &dcl.Property{ + Type: "string", + GoName: "MetricsGcpServiceAccountEmail", + Description: "Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + Deprecated: true, + }, + "oci": &dcl.Property{ + Type: "object", + GoName: "Oci", + GoType: "FeatureMembershipConfigmanagementConfigSyncOci", + Properties: map[string]*dcl.Property{ + "gcpServiceAccountEmail": &dcl.Property{ + Type: "string", + GoName: "GcpServiceAccountEmail", + Description: "The GCP Service Account Email used for auth when secret_type is gcpserviceaccount. ", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Iam/ServiceAccount", + Field: "email", + }, + }, + }, + "policyDir": &dcl.Property{ + Type: "string", + GoName: "PolicyDir", + Description: "The absolute path of the directory that contains the local resources. Default: the root directory of the image.", + }, + "secretType": &dcl.Property{ + Type: "string", + GoName: "SecretType", + Description: "Type of secret configured for access to the OCI Image. Must be one of gcenode, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + "syncRepo": &dcl.Property{ + Type: "string", + GoName: "SyncRepo", + Description: "The OCI image repository URL for the package to sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.", + }, + "syncWaitSecs": &dcl.Property{ + Type: "string", + GoName: "SyncWaitSecs", + Description: "Period in seconds(int64 format) between consecutive syncs. Default: 15.", + }, + }, + }, + "preventDrift": &dcl.Property{ + Type: "boolean", + GoName: "PreventDrift", + Description: "Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", + ServerDefault: true, + }, + "sourceFormat": &dcl.Property{ + Type: "string", + GoName: "SourceFormat", + Description: "Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", + }, + "stopSyncing": &dcl.Property{ + Type: "boolean", + GoName: "StopSyncing", + Description: "Set to true to stop syncing configs for a single cluster. Default: false.", + }, + }, + }, + "hierarchyController": &dcl.Property{ + Type: "object", + GoName: "HierarchyController", + GoType: "FeatureMembershipConfigmanagementHierarchyController", + Description: "Hierarchy Controller configuration for the cluster.", + SendEmpty: true, + Properties: map[string]*dcl.Property{ + "enableHierarchicalResourceQuota": &dcl.Property{ + Type: "boolean", + GoName: "EnableHierarchicalResourceQuota", + Description: "Whether hierarchical resource quota is enabled in this cluster.", + SendEmpty: true, + }, + "enablePodTreeLabels": &dcl.Property{ + Type: "boolean", + GoName: "EnablePodTreeLabels", + Description: "Whether pod tree labels are enabled in this cluster.", + SendEmpty: true, + }, + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", + SendEmpty: true, + }, + }, + }, + "management": &dcl.Property{ + Type: "string", + GoName: "Management", + GoType: "FeatureMembershipConfigmanagementManagementEnum", + Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", + ServerDefault: true, + Enum: []string{ + "MANAGEMENT_UNSPECIFIED", + "MANAGEMENT_AUTOMATIC", + "MANAGEMENT_MANUAL", + }, + }, + "policyController": &dcl.Property{ + Type: "object", + GoName: "PolicyController", + GoType: "FeatureMembershipConfigmanagementPolicyController", + Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", + Properties: map[string]*dcl.Property{ + "auditIntervalSeconds": &dcl.Property{ + Type: "string", + GoName: "AuditIntervalSeconds", + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + "enabled": &dcl.Property{ + Type: "boolean", + GoName: "Enabled", + Description: "Enables the installation of Policy Controller. If false, the rest of PolicyController fields take no effect.", + }, + "exemptableNamespaces": &dcl.Property{ + Type: "array", + GoName: "ExemptableNamespaces", + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "logDeniesEnabled": &dcl.Property{ + Type: "boolean", + GoName: "LogDeniesEnabled", + Description: "Logs all denies and dry run failures.", + }, + "monitoring": &dcl.Property{ + Type: "object", + GoName: "Monitoring", + GoType: "FeatureMembershipConfigmanagementPolicyControllerMonitoring", + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "backends": &dcl.Property{ + Type: "array", + GoName: "Backends", + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum", + Enum: []string{ + "MONITORING_BACKEND_UNSPECIFIED", + "PROMETHEUS", + "CLOUD_MONITORING", + }, + }, + }, + }, + }, + "mutationEnabled": &dcl.Property{ + Type: "boolean", + GoName: "MutationEnabled", + Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", + }, + "referentialRulesEnabled": &dcl.Property{ + Type: "boolean", + GoName: "ReferentialRulesEnabled", + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + "templateLibraryInstalled": &dcl.Property{ + Type: "boolean", + GoName: "TemplateLibraryInstalled", + Description: "Installs the default template library along with Policy Controller.", + }, + }, + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "Optional. Version of ACM to install. Defaults to the latest version.", + ServerDefault: true, + }, + }, + }, + "feature": &dcl.Property{ + Type: "string", + GoName: "Feature", + Description: "The name of the feature", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkehub/Feature", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location of the feature", + Immutable: true, + Parameter: true, + }, + "membership": &dcl.Property{ + Type: "string", + GoName: "Membership", + Description: "The name of the membership", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkehub/Membership", + Field: "name", + }, + }, + Parameter: true, + }, + "membershipLocation": &dcl.Property{ + Type: "string", + GoName: "MembershipLocation", + Description: "The location of the membership", + Immutable: true, + Parameter: true, + }, + "mesh": &dcl.Property{ + Type: "object", + GoName: "Mesh", + GoType: "FeatureMembershipMesh", + Description: "Manage Mesh Features", + Properties: map[string]*dcl.Property{ + "controlPlane": &dcl.Property{ + Type: "string", + GoName: "ControlPlane", + GoType: "FeatureMembershipMeshControlPlaneEnum", + Description: "**DEPRECATED** Whether to automatically manage Service Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED, AUTOMATIC, MANUAL", + Enum: []string{ + "CONTROL_PLANE_MANAGEMENT_UNSPECIFIED", + "AUTOMATIC", + "MANUAL", + }, + }, + "management": &dcl.Property{ + Type: "string", + GoName: "Management", + GoType: "FeatureMembershipMeshManagementEnum", + Description: "Whether to automatically manage Service Mesh. Possible values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL", + Enum: []string{ + "MANAGEMENT_UNSPECIFIED", + "MANAGEMENT_AUTOMATIC", + "MANAGEMENT_MANUAL", + }, + }, + }, + }, + "policycontroller": &dcl.Property{ + Type: "object", + GoName: "Policycontroller", + GoType: "FeatureMembershipPolicycontroller", + Description: "Policy Controller-specific spec.", + Required: []string{ + "policyControllerHubConfig", + }, + Properties: map[string]*dcl.Property{ + "policyControllerHubConfig": &dcl.Property{ + Type: "object", + GoName: "PolicyControllerHubConfig", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfig", + Description: "Policy Controller configuration for the cluster.", + Properties: map[string]*dcl.Property{ + "auditIntervalSeconds": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AuditIntervalSeconds", + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + "constraintViolationLimit": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "ConstraintViolationLimit", + Description: "The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used.", + }, + "deploymentConfigs": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "object", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", + Properties: map[string]*dcl.Property{ + "containerResources": &dcl.Property{ + Type: "object", + GoName: "ContainerResources", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", + Description: "Container resource requirements.", + Conflicts: []string{ + "replicaCount", + "podAffinity", + "podTolerations", + }, + Properties: map[string]*dcl.Property{ + "limits": &dcl.Property{ + Type: "object", + GoName: "Limits", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", + Description: "Limits describes the maximum amount of compute resources allowed for use by the running container.", + Properties: map[string]*dcl.Property{ + "cpu": &dcl.Property{ + Type: "string", + GoName: "Cpu", + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + "memory": &dcl.Property{ + Type: "string", + GoName: "Memory", + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + }, + "requests": &dcl.Property{ + Type: "object", + GoName: "Requests", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", + Description: "Requests describes the amount of compute resources reserved for the container by the kube-scheduler.", + Properties: map[string]*dcl.Property{ + "cpu": &dcl.Property{ + Type: "string", + GoName: "Cpu", + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + "memory": &dcl.Property{ + Type: "string", + GoName: "Memory", + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + }, + }, + }, + "podAffinity": &dcl.Property{ + Type: "string", + GoName: "PodAffinity", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum", + Description: "Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY", + Conflicts: []string{ + "replicaCount", + "containerResources", + "podTolerations", + }, + Enum: []string{ + "AFFINITY_UNSPECIFIED", + "NO_AFFINITY", + "ANTI_AFFINITY", + }, + }, + "podTolerations": &dcl.Property{ + Type: "array", + GoName: "PodTolerations", + Description: "Pod tolerations of node taints.", + Conflicts: []string{ + "replicaCount", + "containerResources", + "podAffinity", + }, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", + Properties: map[string]*dcl.Property{ + "effect": &dcl.Property{ + Type: "string", + GoName: "Effect", + Description: "Matches a taint effect.", + }, + "key": &dcl.Property{ + Type: "string", + GoName: "Key", + Description: "Matches a taint key (not necessarily unique).", + }, + "operator": &dcl.Property{ + Type: "string", + GoName: "Operator", + Description: "Matches a taint operator.", + }, + "value": &dcl.Property{ + Type: "string", + GoName: "Value", + Description: "Matches a taint value.", + }, + }, + }, + }, + "replicaCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "ReplicaCount", + Description: "Pod replica count.", + Conflicts: []string{ + "containerResources", + "podAffinity", + "podTolerations", + }, + }, + }, + }, + GoName: "DeploymentConfigs", + Description: "Map of deployment configs to deployments (\"admission\", \"audit\", \"mutation\").", + ServerDefault: true, + }, + "exemptableNamespaces": &dcl.Property{ + Type: "array", + GoName: "ExemptableNamespaces", + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "installSpec": &dcl.Property{ + Type: "string", + GoName: "InstallSpec", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum", + Description: "Configures the mode of the Policy Controller installation. Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED, INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED", + Enum: []string{ + "INSTALL_SPEC_UNSPECIFIED", + "INSTALL_SPEC_NOT_INSTALLED", + "INSTALL_SPEC_ENABLED", + "INSTALL_SPEC_SUSPENDED", + "INSTALL_SPEC_DETACHED", + }, + }, + "logDeniesEnabled": &dcl.Property{ + Type: "boolean", + GoName: "LogDeniesEnabled", + Description: "Logs all denies and dry run failures.", + }, + "monitoring": &dcl.Property{ + Type: "object", + GoName: "Monitoring", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "backends": &dcl.Property{ + Type: "array", + GoName: "Backends", + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + ServerDefault: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum", + Enum: []string{ + "MONITORING_BACKEND_UNSPECIFIED", + "PROMETHEUS", + "CLOUD_MONITORING", + }, + }, + }, + }, + }, + "mutationEnabled": &dcl.Property{ + Type: "boolean", + GoName: "MutationEnabled", + Description: "Enables the ability to mutate resources using Policy Controller.", + }, + "policyContent": &dcl.Property{ + Type: "object", + GoName: "PolicyContent", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", + Description: "Specifies the desired policy content on the cluster.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "bundles": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "object", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", + Properties: map[string]*dcl.Property{ + "exemptedNamespaces": &dcl.Property{ + Type: "array", + GoName: "ExemptedNamespaces", + Description: "The set of namespaces to be exempted from the bundle.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + GoName: "Bundles", + Description: "map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint.", + }, + "templateLibrary": &dcl.Property{ + Type: "object", + GoName: "TemplateLibrary", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", + Description: "Configures the installation of the Template Library.", + ServerDefault: true, + Properties: map[string]*dcl.Property{ + "installation": &dcl.Property{ + Type: "string", + GoName: "Installation", + GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum", + Description: "Configures the manner in which the template library is installed on the cluster. Possible values: INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL", + Enum: []string{ + "INSTALLATION_UNSPECIFIED", + "NOT_INSTALLED", + "ALL", + }, + }, + }, + }, + }, + }, + "referentialRulesEnabled": &dcl.Property{ + Type: "boolean", + GoName: "ReferentialRulesEnabled", + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + }, + }, + "version": &dcl.Property{ + Type: "string", + GoName: "Version", + Description: "Optional. Version of Policy Controller to install. Defaults to the latest version.", + ServerDefault: true, + }, + }, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project of the feature", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl new file mode 100644 index 000000000000..9b8b04f8d265 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl @@ -0,0 +1,331 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLFeatureSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "GkeHub/Feature", + Description: "The GkeHub Feature resource", + StructName: "Feature", + Mutex: "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Feature", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "feature", + Required: true, + Description: "A full instance of a Feature", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Feature", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "feature", + Required: true, + Description: "A full instance of a Feature", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Feature", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "feature", + Required: true, + Description: "A full instance of a Feature", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Feature", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Feature", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Feature": &dcl.Component{ + Title: "Feature", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + LabelsField: "labels", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. When the Feature resource was created.", + Immutable: true, + }, + "deleteTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "DeleteTime", + ReadOnly: true, + Description: "Output only. When the Feature resource was deleted.", + Immutable: true, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "GCP labels for this Feature.", + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The full, unique name of this Feature resource", + Immutable: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "resourceState": &dcl.Property{ + Type: "object", + GoName: "ResourceState", + GoType: "FeatureResourceState", + ReadOnly: true, + Description: "State of the Feature resource itself.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "hasResources": &dcl.Property{ + Type: "boolean", + GoName: "HasResources", + ReadOnly: true, + Description: "Whether this Feature has outstanding resources that need to be cleaned up before it can be disabled.", + Immutable: true, + }, + "state": &dcl.Property{ + Type: "string", + GoName: "State", + GoType: "FeatureResourceStateStateEnum", + ReadOnly: true, + Description: "The current state of the Feature resource in the Hub API. Possible values: STATE_UNSPECIFIED, ENABLING, ACTIVE, DISABLING, UPDATING, SERVICE_UPDATING", + Immutable: true, + Enum: []string{ + "STATE_UNSPECIFIED", + "ENABLING", + "ACTIVE", + "DISABLING", + "UPDATING", + "SERVICE_UPDATING", + }, + }, + }, + }, + "spec": &dcl.Property{ + Type: "object", + GoName: "Spec", + GoType: "FeatureSpec", + Description: "Optional. Hub-wide Feature configuration. If this Feature does not support any Hub-wide configuration, this field may be unused.", + Properties: map[string]*dcl.Property{ + "fleetobservability": &dcl.Property{ + Type: "object", + GoName: "Fleetobservability", + GoType: "FeatureSpecFleetobservability", + Description: "Fleet Observability spec.", + Properties: map[string]*dcl.Property{ + "loggingConfig": &dcl.Property{ + Type: "object", + GoName: "LoggingConfig", + GoType: "FeatureSpecFleetobservabilityLoggingConfig", + Description: "Fleet Observability Logging-specific spec.", + Properties: map[string]*dcl.Property{ + "defaultConfig": &dcl.Property{ + Type: "object", + GoName: "DefaultConfig", + GoType: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", + Description: "Specified if applying the default routing config to logs not specified in other configs.", + Properties: map[string]*dcl.Property{ + "mode": &dcl.Property{ + Type: "string", + GoName: "Mode", + GoType: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum", + Description: "The logs routing mode Possible values: MODE_UNSPECIFIED, COPY, MOVE", + Enum: []string{ + "MODE_UNSPECIFIED", + "COPY", + "MOVE", + }, + }, + }, + }, + "fleetScopeLogsConfig": &dcl.Property{ + Type: "object", + GoName: "FleetScopeLogsConfig", + GoType: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", + Description: "Specified if applying the routing config to all logs for all fleet scopes.", + Properties: map[string]*dcl.Property{ + "mode": &dcl.Property{ + Type: "string", + GoName: "Mode", + GoType: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum", + Description: "The logs routing mode Possible values: MODE_UNSPECIFIED, COPY, MOVE", + Enum: []string{ + "MODE_UNSPECIFIED", + "COPY", + "MOVE", + }, + }, + }, + }, + }, + }, + }, + }, + "multiclusteringress": &dcl.Property{ + Type: "object", + GoName: "Multiclusteringress", + GoType: "FeatureSpecMulticlusteringress", + Description: "Multicluster Ingress-specific spec.", + Required: []string{ + "configMembership", + }, + Properties: map[string]*dcl.Property{ + "configMembership": &dcl.Property{ + Type: "string", + GoName: "ConfigMembership", + Description: "Fully-qualified Membership name which hosts the MultiClusterIngress CRD. Example: `projects/foo-proj/locations/global/memberships/bar`", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Gkehub/Membership", + Field: "name", + }, + }, + }, + }, + }, + }, + }, + "state": &dcl.Property{ + Type: "object", + GoName: "State", + GoType: "FeatureState", + ReadOnly: true, + Description: "Output only. The Hub-wide Feature state", + Immutable: true, + Properties: map[string]*dcl.Property{ + "state": &dcl.Property{ + Type: "object", + GoName: "State", + GoType: "FeatureStateState", + ReadOnly: true, + Description: "Output only. The \"running state\" of the Feature in this Hub.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "code": &dcl.Property{ + Type: "string", + GoName: "Code", + GoType: "FeatureStateStateCodeEnum", + ReadOnly: true, + Description: "The high-level, machine-readable status of this Feature. Possible values: CODE_UNSPECIFIED, OK, WARNING, ERROR", + Immutable: true, + Enum: []string{ + "CODE_UNSPECIFIED", + "OK", + "WARNING", + "ERROR", + }, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + ReadOnly: true, + Description: "A human-readable description of the current status.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + GoName: "UpdateTime", + ReadOnly: true, + Description: "The time this status and any related Feature-specific details were updated. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\"", + Immutable: true, + }, + }, + }, + }, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. When the Feature resource was last updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl new file mode 100644 index 000000000000..16fed25491ff --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl @@ -0,0 +1,122 @@ +package gkehub + +import ( + "bytes" + "context" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func expandHubReferenceLink(_ *Client, val *string, _ *Membership) (interface{}, error) { + if val == nil { + return nil, nil + } + + v := *val + + if strings.HasPrefix(v, "https:") { + return strings.Replace(strings.Replace(strings.Replace(*val, "https:", "", 1), "v1/", "", 1), "v1beta1/", "", 1), nil + } else if strings.HasPrefix(v, "//container.googleapis.com") { + return v, nil + } + return "//container.googleapis.com/" + v, nil +} + +func flattenHubReferenceLink(_ *Client, config interface{}, _ *Membership) *string { + v, ok := config.(string) + if !ok { + return nil + } + + v = strings.Replace(v, "//container.googleapis.com/", "", 1) + + return &v +} + +// Feature has custom url methods because it uses v1beta endpoints instead of v1beta1. +func (r *Feature) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil +} + +func (r *Feature) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil + +} + +func (r *Feature) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features?featureId={{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil + +} + +func (r *Feature) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil +} + +func (op *updateFeatureUpdateFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { + _, err := c.GetFeature(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateFeature") + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": "labels,spec"}) + if err != nil { + return err + } + + req, err := newUpdateFeatureUpdateFeatureRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.Infof("Created update: %#v", req) + body, err := marshalUpdateFeatureUpdateFeatureRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(ctx, c.Config, "https://gkehub.googleapis.com/v1beta/", "GET") + + if err != nil { + return err + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl new file mode 100644 index 000000000000..deed358e280c --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl @@ -0,0 +1,368 @@ +package gkehub + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +// getMembershipSpecs returns a map of membership specs taken from the get response of the feature membership's feature object. +func getMembershipSpecs(ctx context.Context, r *FeatureMembership, c *Client) (map[string]any, error) { + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := io.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + membershipSpecs, ok := m["membershipSpecs"].(map[string]any) + if !ok { + return map[string]any{}, nil + } + return membershipSpecs, nil +} + +// Return the value if it exists, default otherwise +func valueOrDefaultString(val *string, def string) string { + if dcl.ValueOrEmptyString(val) == "" { + return def + } + return dcl.ValueOrEmptyString(val) +} + +// Return the full key for a given FeatureMembership's entry in the membershipSpecs field. +func membershipSpecKey(r *FeatureMembership) string { + params := map[string]any{ + "project": dcl.ValueOrEmptyString(r.Project), + "location": valueOrDefaultString(r.MembershipLocation, "global"), + "membership": dcl.ValueOrEmptyString(r.Membership), + } + + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", params) +} + +// Find and return the key and value in membershipSpecs matching the given membership. +func findMembershipSpec(membership string, membershipLocation string, membershipSpecs map[string]any) (string, map[string]any, error) { + for key, value := range membershipSpecs { + if strings.HasSuffix(key, fmt.Sprintf("%s/memberships/%s", membershipLocation, membership)) { + spec, ok := value.(map[string]any) + if !ok { + return "", nil, errors.New("membership spec was not of map type") + } + return key, spec, nil + } + } + return "", nil, &googleapi.Error{ + Code: 404, + Message: "feature membership not found in feature membership specs", + } +} + +func sendFeatureUpdate(ctx context.Context, req map[string]any, c *Client, u string) error { + c.Config.Logger.Infof("Created update: %#v", req) + body, err := json.Marshal(req) + if err != nil { + return err + } + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": "membershipSpecs"}) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(ctx, c.Config, "https://gkehub.googleapis.com/v1beta/", "GET") + + if err != nil { + return err + } + + return nil +} + +func (op *createFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + nr := r.urlNormalized() + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return err + } + m, err := expandFeatureMembership(c, nr) + if err != nil { + return err + } + if err := dcl.PutMapEntry(membershipSpecs, []string{membershipSpecKey(nr)}, m); err != nil { + return err + } + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +// GetFeatureMembership returns a feature membership object retrieved from the membershipSpecs field of a feature. +func (c *Client) GetFeatureMembership(ctx context.Context, r *FeatureMembership) (*FeatureMembership, error) { + nr := r.urlNormalized() + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return nil, err + } + _, spec, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return nil, err + } + result, err := unmarshalMapFeatureMembership(spec, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Feature = r.Feature + result.Membership = r.Membership + + c.Config.Logger.Infof("Retrieved raw result state: %v", result) + c.Config.Logger.Infof("Canonicalizing with specified state: %v", r) + result, err = canonicalizeFeatureMembershipNewState(c, result, r) + if err != nil { + return nil, err + } + c.Config.Logger.Infof("Created result state: %v", result) + + return result, nil +} + +// HasNext always returns false because a feature membership list never has a next page. +func (l *FeatureMembershipList) HasNext() bool { + return false +} + +// Next returns nil because it will never be called. +func (l *FeatureMembershipList) Next(_ context.Context, _ *Client) error { + return nil +} + +// ListFeatureMembership returns a list of feature memberships retrieved from the membershipSpecs field of a feature. +func (c *Client) ListFeatureMembership(ctx context.Context, project, location, feature string) (*FeatureMembershipList, error) { + r := &FeatureMembership{ + Project: &project, + Location: &location, + Feature: &feature, + } + membershipSpecs, err := getMembershipSpecs(ctx, r, c) + if err != nil { + return nil, err + } + var list *FeatureMembershipList + for key, spec := range membershipSpecs { + m, ok := spec.(map[string]any) + if !ok { + return nil, errors.New("membership spec was not of map type") + } + ri, err := unmarshalMapFeatureMembership(m, c, r) + if err != nil { + return nil, err + } + ri.Project = r.Project + ri.Location = r.Location + ri.Feature = r.Feature + ri.Membership = dcl.SelfLinkToName(&key) + list.Items = append(list.Items, ri) + } + return list, nil +} + +func (op *updateFeatureMembershipUpdateFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + nr := r.urlNormalized() + u, err := r.updateURL(c.Config.BasePath, "UpdateFeatureMembership") + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + membershipSpecs, err := getMembershipSpecs(ctx, r, c) + if err != nil { + return err + } + key, _, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return err + } + m, err := expandFeatureMembership(c, r) + if err != nil { + return err + } + if err := dcl.PutMapEntry(membershipSpecs, []string{key}, m); err != nil { + return err + } + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +func (op *deleteFeatureMembershipOperation) do(ctx context.Context, r *FeatureMembership, c *Client) error { + nr := r.urlNormalized() + u, err := nr.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + u = strings.Replace(u, "v1beta1", "v1beta", 1) + + membershipSpecs, err := getMembershipSpecs(ctx, nr, c) + if err != nil { + return err + } + key, _, err := findMembershipSpec(dcl.ValueOrEmptyString(nr.Membership), valueOrDefaultString(nr.MembershipLocation, "global"), membershipSpecs) + if err != nil { + return err + } + membershipSpecs[key] = map[string]any{} + req := map[string]any{ + "membershipSpecs": membershipSpecs, + } + return sendFeatureUpdate(ctx, req, c, u) +} + +// CompareFeatureMembershipConfigmanagementHierarchyControllerNewStyle exists only for unit-testing the diff library. +func CompareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a any, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + return compareFeatureMembershipConfigmanagementHierarchyControllerNewStyle(d, a, fn) +} + +// This function behaves the same way as the generated diff function, except that it explicitly +// checks for emptiness as well. +func emptyHNCSameAsAllFalse(d, a any) bool { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + desiredNotPointer, ok := d.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + fmt.Printf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController or *FeatureMembershipConfigmanagementHierarchyController\n", d) + return false + } + desired = &desiredNotPointer + } + actual, ok := a.(*FeatureMembershipConfigmanagementHierarchyController) + if !ok { + actualNotPointer, ok := a.(FeatureMembershipConfigmanagementHierarchyController) + if !ok { + fmt.Printf("obj %v is not a FeatureMembershipConfigmanagementHierarchyController\n", a) + return false + } + actual = &actualNotPointer + } + + if actual == nil && desired == nil { + return true + } + if actual == nil || desired == nil { + return false + } + + if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "Enabled"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnablePodTreeLabels, actual.EnablePodTreeLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "EnablePodTreeLabels"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.EnableHierarchicalResourceQuota, actual.EnableHierarchicalResourceQuota, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, dcl.FieldName{FieldName: "EnableHierarchicalResourceQuota"}); len(ds) != 0 || err != nil { + if err != nil { + fmt.Print(err) + return false + } + diffs = append(diffs, ds...) + } + + if len(diffs) > 0 { + return false + } + + if desired.Empty() != actual.Empty() { + return false + } + return true +} + +func flattenHierarchyControllerConfig(c *Client, i any, v *FeatureMembership) *FeatureMembershipConfigmanagementHierarchyController { + m, ok := i.(map[string]any) + if !ok { + return nil + } + + r := &FeatureMembershipConfigmanagementHierarchyController{} + + // Compared to the generated code, we removed the part where we skip flattening the API response + // if the return value is empty (i.e. HNC = {}). This is because the Hub API returns the same + // empty object for both {} (empty config) and {fieldA: false, fieldB: false, fieldC: false}. We + // always flatten the response into the latter form i.e. explicitly stating false values, so that + // it fits more easily into the declarative pattern and avoids a permadiff bug. + r.Enabled = dcl.FlattenBool(m["enabled"]) + r.EnablePodTreeLabels = dcl.FlattenBool(m["enablePodTreeLabels"]) + r.EnableHierarchicalResourceQuota = dcl.FlattenBool(m["enableHierarchicalResourceQuota"]) + + return r +} + +func expandHierarchyControllerConfig(c *Client, f *FeatureMembershipConfigmanagementHierarchyController, res *FeatureMembership) (map[string]any, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]any) + if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { + m["enabled"] = v + } + if v := f.EnablePodTreeLabels; !dcl.IsEmptyValueIndirect(v) { + m["enablePodTreeLabels"] = v + } + if v := f.EnableHierarchicalResourceQuota; !dcl.IsEmptyValueIndirect(v) { + m["enableHierarchicalResourceQuota"] = v + } + + return m, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl new file mode 100644 index 000000000000..e66ec693cda0 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl @@ -0,0 +1,902 @@ +package gkehub + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + "google.golang.org/api/googleapi" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +type Membership struct { + Endpoint *MembershipEndpoint `json:"endpoint"` + Name *string `json:"name"` + Labels map[string]string `json:"labels"` + Description *string `json:"description"` + State *MembershipState `json:"state"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + DeleteTime *string `json:"deleteTime"` + ExternalId *string `json:"externalId"` + LastConnectionTime *string `json:"lastConnectionTime"` + UniqueId *string `json:"uniqueId"` + Authority *MembershipAuthority `json:"authority"` + InfrastructureType *MembershipInfrastructureTypeEnum `json:"infrastructureType"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *Membership) String() string { + return dcl.SprintResource(r) +} + +// The enum MembershipStateCodeEnum. +type MembershipStateCodeEnum string + +// MembershipStateCodeEnumRef returns a *MembershipStateCodeEnum with the value of string s +// If the empty string is provided, nil is returned. +func MembershipStateCodeEnumRef(s string) *MembershipStateCodeEnum { + v := MembershipStateCodeEnum(s) + return &v +} + +func (v MembershipStateCodeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CODE_UNSPECIFIED", "CREATING", "READY", "DELETING", "UPDATING", "SERVICE_UPDATING"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "MembershipStateCodeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum MembershipInfrastructureTypeEnum. +type MembershipInfrastructureTypeEnum string + +// MembershipInfrastructureTypeEnumRef returns a *MembershipInfrastructureTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func MembershipInfrastructureTypeEnumRef(s string) *MembershipInfrastructureTypeEnum { + v := MembershipInfrastructureTypeEnum(s) + return &v +} + +func (v MembershipInfrastructureTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"INFRASTRUCTURE_TYPE_UNSPECIFIED", "ON_PREM", "MULTI_CLOUD"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "MembershipInfrastructureTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +type MembershipEndpoint struct { + empty bool `json:"-"` + GkeCluster *MembershipEndpointGkeCluster `json:"gkeCluster"` + KubernetesMetadata *MembershipEndpointKubernetesMetadata `json:"kubernetesMetadata"` + KubernetesResource *MembershipEndpointKubernetesResource `json:"kubernetesResource"` +} + +type jsonMembershipEndpoint MembershipEndpoint + +func (r *MembershipEndpoint) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpoint + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpoint + } else { + + r.GkeCluster = res.GkeCluster + + r.KubernetesMetadata = res.KubernetesMetadata + + r.KubernetesResource = res.KubernetesResource + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpoint is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpoint *MembershipEndpoint = &MembershipEndpoint{empty: true} + +func (r *MembershipEndpoint) Empty() bool { + return r.empty +} + +func (r *MembershipEndpoint) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpoint) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointGkeCluster struct { + empty bool `json:"-"` + ResourceLink *string `json:"resourceLink"` +} + +type jsonMembershipEndpointGkeCluster MembershipEndpointGkeCluster + +func (r *MembershipEndpointGkeCluster) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointGkeCluster + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointGkeCluster + } else { + + r.ResourceLink = res.ResourceLink + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointGkeCluster is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointGkeCluster *MembershipEndpointGkeCluster = &MembershipEndpointGkeCluster{empty: true} + +func (r *MembershipEndpointGkeCluster) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointGkeCluster) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointGkeCluster) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointKubernetesMetadata struct { + empty bool `json:"-"` + KubernetesApiServerVersion *string `json:"kubernetesApiServerVersion"` + NodeProviderId *string `json:"nodeProviderId"` + NodeCount *int64 `json:"nodeCount"` + VcpuCount *int64 `json:"vcpuCount"` + MemoryMb *int64 `json:"memoryMb"` + UpdateTime *string `json:"updateTime"` +} + +type jsonMembershipEndpointKubernetesMetadata MembershipEndpointKubernetesMetadata + +func (r *MembershipEndpointKubernetesMetadata) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointKubernetesMetadata + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointKubernetesMetadata + } else { + + r.KubernetesApiServerVersion = res.KubernetesApiServerVersion + + r.NodeProviderId = res.NodeProviderId + + r.NodeCount = res.NodeCount + + r.VcpuCount = res.VcpuCount + + r.MemoryMb = res.MemoryMb + + r.UpdateTime = res.UpdateTime + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointKubernetesMetadata is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointKubernetesMetadata *MembershipEndpointKubernetesMetadata = &MembershipEndpointKubernetesMetadata{empty: true} + +func (r *MembershipEndpointKubernetesMetadata) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointKubernetesMetadata) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointKubernetesMetadata) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointKubernetesResource struct { + empty bool `json:"-"` + MembershipCrManifest *string `json:"membershipCrManifest"` + MembershipResources []MembershipEndpointKubernetesResourceMembershipResources `json:"membershipResources"` + ConnectResources []MembershipEndpointKubernetesResourceConnectResources `json:"connectResources"` + ResourceOptions *MembershipEndpointKubernetesResourceResourceOptions `json:"resourceOptions"` +} + +type jsonMembershipEndpointKubernetesResource MembershipEndpointKubernetesResource + +func (r *MembershipEndpointKubernetesResource) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointKubernetesResource + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointKubernetesResource + } else { + + r.MembershipCrManifest = res.MembershipCrManifest + + r.MembershipResources = res.MembershipResources + + r.ConnectResources = res.ConnectResources + + r.ResourceOptions = res.ResourceOptions + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointKubernetesResource is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointKubernetesResource *MembershipEndpointKubernetesResource = &MembershipEndpointKubernetesResource{empty: true} + +func (r *MembershipEndpointKubernetesResource) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointKubernetesResource) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointKubernetesResource) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointKubernetesResourceMembershipResources struct { + empty bool `json:"-"` + Manifest *string `json:"manifest"` + ClusterScoped *bool `json:"clusterScoped"` +} + +type jsonMembershipEndpointKubernetesResourceMembershipResources MembershipEndpointKubernetesResourceMembershipResources + +func (r *MembershipEndpointKubernetesResourceMembershipResources) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointKubernetesResourceMembershipResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointKubernetesResourceMembershipResources + } else { + + r.Manifest = res.Manifest + + r.ClusterScoped = res.ClusterScoped + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceMembershipResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointKubernetesResourceMembershipResources *MembershipEndpointKubernetesResourceMembershipResources = &MembershipEndpointKubernetesResourceMembershipResources{empty: true} + +func (r *MembershipEndpointKubernetesResourceMembershipResources) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointKubernetesResourceMembershipResources) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointKubernetesResourceMembershipResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointKubernetesResourceConnectResources struct { + empty bool `json:"-"` + Manifest *string `json:"manifest"` + ClusterScoped *bool `json:"clusterScoped"` +} + +type jsonMembershipEndpointKubernetesResourceConnectResources MembershipEndpointKubernetesResourceConnectResources + +func (r *MembershipEndpointKubernetesResourceConnectResources) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointKubernetesResourceConnectResources + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointKubernetesResourceConnectResources + } else { + + r.Manifest = res.Manifest + + r.ClusterScoped = res.ClusterScoped + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceConnectResources is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointKubernetesResourceConnectResources *MembershipEndpointKubernetesResourceConnectResources = &MembershipEndpointKubernetesResourceConnectResources{empty: true} + +func (r *MembershipEndpointKubernetesResourceConnectResources) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointKubernetesResourceConnectResources) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointKubernetesResourceConnectResources) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipEndpointKubernetesResourceResourceOptions struct { + empty bool `json:"-"` + ConnectVersion *string `json:"connectVersion"` + V1Beta1Crd *bool `json:"v1beta1Crd"` +} + +type jsonMembershipEndpointKubernetesResourceResourceOptions MembershipEndpointKubernetesResourceResourceOptions + +func (r *MembershipEndpointKubernetesResourceResourceOptions) UnmarshalJSON(data []byte) error { + var res jsonMembershipEndpointKubernetesResourceResourceOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipEndpointKubernetesResourceResourceOptions + } else { + + r.ConnectVersion = res.ConnectVersion + + r.V1Beta1Crd = res.V1Beta1Crd + + } + return nil +} + +// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceResourceOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipEndpointKubernetesResourceResourceOptions *MembershipEndpointKubernetesResourceResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{empty: true} + +func (r *MembershipEndpointKubernetesResourceResourceOptions) Empty() bool { + return r.empty +} + +func (r *MembershipEndpointKubernetesResourceResourceOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipEndpointKubernetesResourceResourceOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipState struct { + empty bool `json:"-"` + Code *MembershipStateCodeEnum `json:"code"` +} + +type jsonMembershipState MembershipState + +func (r *MembershipState) UnmarshalJSON(data []byte) error { + var res jsonMembershipState + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipState + } else { + + r.Code = res.Code + + } + return nil +} + +// This object is used to assert a desired state where this MembershipState is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipState *MembershipState = &MembershipState{empty: true} + +func (r *MembershipState) Empty() bool { + return r.empty +} + +func (r *MembershipState) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipState) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type MembershipAuthority struct { + empty bool `json:"-"` + Issuer *string `json:"issuer"` + WorkloadIdentityPool *string `json:"workloadIdentityPool"` + IdentityProvider *string `json:"identityProvider"` +} + +type jsonMembershipAuthority MembershipAuthority + +func (r *MembershipAuthority) UnmarshalJSON(data []byte) error { + var res jsonMembershipAuthority + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyMembershipAuthority + } else { + + r.Issuer = res.Issuer + + r.WorkloadIdentityPool = res.WorkloadIdentityPool + + r.IdentityProvider = res.IdentityProvider + + } + return nil +} + +// This object is used to assert a desired state where this MembershipAuthority is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyMembershipAuthority *MembershipAuthority = &MembershipAuthority{empty: true} + +func (r *MembershipAuthority) Empty() bool { + return r.empty +} + +func (r *MembershipAuthority) String() string { + return dcl.SprintResource(r) +} + +func (r *MembershipAuthority) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Membership) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "gke_hub", + Type: "Membership", + Version: "beta", + } +} + +func (r *Membership) ID() (string, error) { + if err := extractMembershipFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), + "name": dcl.ValueOrEmptyString(nr.Name), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "description": dcl.ValueOrEmptyString(nr.Description), + "state": dcl.ValueOrEmptyString(nr.State), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), + "external_id": dcl.ValueOrEmptyString(nr.ExternalId), + "last_connection_time": dcl.ValueOrEmptyString(nr.LastConnectionTime), + "unique_id": dcl.ValueOrEmptyString(nr.UniqueId), + "authority": dcl.ValueOrEmptyString(nr.Authority), + "infrastructure_type": dcl.ValueOrEmptyString(nr.InfrastructureType), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const MembershipMaxPage = -1 + +type MembershipList struct { + Items []*Membership + + nextToken string + + pageSize int32 + + resource *Membership +} + +func (l *MembershipList) HasNext() bool { + return l.nextToken != "" +} + +func (l *MembershipList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listMembership(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListMembership(ctx context.Context, project, location string) (*MembershipList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListMembershipWithMaxResults(ctx, project, location, MembershipMaxPage) + +} + +func (c *Client) ListMembershipWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*MembershipList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Membership{ + Project: &project, + Location: &location, + } + items, token, err := c.listMembership(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &MembershipList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetMembership(ctx context.Context, r *Membership) (*Membership, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractMembershipFields(r) + + b, err := c.getMembershipRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalMembership(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeMembershipNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractMembershipFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteMembership(ctx context.Context, r *Membership) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Membership resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Membership...") + deleteOp := deleteMembershipOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllMembership deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllMembership(ctx context.Context, project, location string, filter func(*Membership) bool) error { + listObj, err := c.ListMembership(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllMembership(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyMembership(ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (*Membership, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Membership + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyMembershipHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyMembershipHelper(c *Client, ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (*Membership, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyMembership...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractMembershipFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.membershipDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToMembershipDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []membershipApiOperation + if create { + ops = append(ops, &createMembershipOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyMembershipDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyMembershipDiff(c *Client, ctx context.Context, desired *Membership, rawDesired *Membership, ops []membershipApiOperation, opts ...dcl.ApplyOption) (*Membership, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetMembership(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createMembershipOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapMembership(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeMembershipNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeMembershipNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeMembershipDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractMembershipFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractMembershipFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffMembership(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} + +func (r *Membership) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { + u := r.getPolicyURL(basePath) + body := &bytes.Buffer{} + u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) + if err != nil { + return "", "", nil, err + } + return u, "", body, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl new file mode 100644 index 000000000000..d0f34246b2d8 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl @@ -0,0 +1,3830 @@ +package gkehub + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" +) + +func (r *Membership) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Endpoint) { + if err := r.Endpoint.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.State) { + if err := r.State.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Authority) { + if err := r.Authority.validate(); err != nil { + return err + } + } + return nil +} +func (r *MembershipEndpoint) validate() error { + if !dcl.IsEmptyValueIndirect(r.GkeCluster) { + if err := r.GkeCluster.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.KubernetesMetadata) { + if err := r.KubernetesMetadata.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.KubernetesResource) { + if err := r.KubernetesResource.validate(); err != nil { + return err + } + } + return nil +} +func (r *MembershipEndpointGkeCluster) validate() error { + return nil +} +func (r *MembershipEndpointKubernetesMetadata) validate() error { + return nil +} +func (r *MembershipEndpointKubernetesResource) validate() error { + if !dcl.IsEmptyValueIndirect(r.ResourceOptions) { + if err := r.ResourceOptions.validate(); err != nil { + return err + } + } + return nil +} +func (r *MembershipEndpointKubernetesResourceMembershipResources) validate() error { + return nil +} +func (r *MembershipEndpointKubernetesResourceConnectResources) validate() error { + return nil +} +func (r *MembershipEndpointKubernetesResourceResourceOptions) validate() error { + return nil +} +func (r *MembershipState) validate() error { + return nil +} +func (r *MembershipAuthority) validate() error { + return nil +} +func (r *Membership) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) +} + +func (r *Membership) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Membership) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships", nr.basePath(), userBasePath, params), nil + +} + +func (r *Membership) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships?membershipId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Membership) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Membership) SetPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Membership) SetPolicyVerb() string { + return "" +} + +func (r *Membership) getPolicyURL(userBasePath string) string { + nr := r.urlNormalized() + fields := map[string]interface{}{} + return dcl.URL("", nr.basePath(), userBasePath, fields) +} + +func (r *Membership) IAMPolicyVersion() int { + return 3 +} + +// membershipApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type membershipApiOperation interface { + do(context.Context, *Membership, *Client) error +} + +// newUpdateMembershipUpdateMembershipRequest creates a request for an +// Membership resource's UpdateMembership update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateMembershipUpdateMembershipRequest(ctx context.Context, f *Membership, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := expandMembershipEndpoint(c, f.Endpoint, res); err != nil { + return nil, fmt.Errorf("error expanding Endpoint into endpoint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["endpoint"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + if v := f.Description; !dcl.IsEmptyValueIndirect(v) { + req["description"] = v + } + if v := f.ExternalId; !dcl.IsEmptyValueIndirect(v) { + req["externalId"] = v + } + if v, err := expandMembershipAuthority(c, f.Authority, res); err != nil { + return nil, fmt.Errorf("error expanding Authority into authority: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["authority"] = v + } + if v := f.InfrastructureType; !dcl.IsEmptyValueIndirect(v) { + req["infrastructureType"] = v + } + return req, nil +} + +// marshalUpdateMembershipUpdateMembershipRequest converts the update into +// the final JSON request body. +func marshalUpdateMembershipUpdateMembershipRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateMembershipUpdateMembershipOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateMembershipUpdateMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { + _, err := c.GetMembership(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateMembership") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateMembershipUpdateMembershipRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateMembershipUpdateMembershipRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listMembershipRaw(ctx context.Context, r *Membership, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != MembershipMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listMembershipOperation struct { + Resources []map[string]interface{} `json:"resources"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listMembership(ctx context.Context, r *Membership, pageToken string, pageSize int32) ([]*Membership, string, error) { + b, err := c.listMembershipRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listMembershipOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Membership + for _, v := range m.Resources { + res, err := unmarshalMapMembership(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllMembership(ctx context.Context, f func(*Membership) bool, resources []*Membership) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteMembership(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteMembershipOperation struct{} + +func (op *deleteMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { + r, err := c.GetMembership(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Membership not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetMembership checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetMembership(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createMembershipOperation struct { + response map[string]interface{} +} + +func (op *createMembershipOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetMembership(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getMembershipRaw(ctx context.Context, r *Membership) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) membershipDiffsForRawDesired(ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (initial, desired *Membership, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Membership + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Membership); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Membership, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetMembership(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Membership resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Membership resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Membership resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeMembershipDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Membership: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Membership: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractMembershipFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeMembershipInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Membership: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeMembershipDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Membership: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffMembership(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeMembershipInitialState(rawInitial, rawDesired *Membership) (*Membership, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeMembershipDesiredState(rawDesired, rawInitial *Membership, opts ...dcl.ApplyOption) (*Membership, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.Endpoint = canonicalizeMembershipEndpoint(rawDesired.Endpoint, nil, opts...) + rawDesired.State = canonicalizeMembershipState(rawDesired.State, nil, opts...) + rawDesired.Authority = canonicalizeMembershipAuthority(rawDesired.Authority, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Membership{} + canonicalDesired.Endpoint = canonicalizeMembershipEndpoint(rawDesired.Endpoint, rawInitial.Endpoint, opts...) + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { + canonicalDesired.Description = rawInitial.Description + } else { + canonicalDesired.Description = rawDesired.Description + } + if dcl.StringCanonicalize(rawDesired.ExternalId, rawInitial.ExternalId) { + canonicalDesired.ExternalId = rawInitial.ExternalId + } else { + canonicalDesired.ExternalId = rawDesired.ExternalId + } + canonicalDesired.Authority = canonicalizeMembershipAuthority(rawDesired.Authority, rawInitial.Authority, opts...) + if dcl.IsZeroValue(rawDesired.InfrastructureType) || (dcl.IsEmptyValueIndirect(rawDesired.InfrastructureType) && dcl.IsEmptyValueIndirect(rawInitial.InfrastructureType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.InfrastructureType = rawInitial.InfrastructureType + } else { + canonicalDesired.InfrastructureType = rawDesired.InfrastructureType + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + return canonicalDesired, nil +} + +func canonicalizeMembershipNewState(c *Client, rawNew, rawDesired *Membership) (*Membership, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { + rawNew.Endpoint = rawDesired.Endpoint + } else { + rawNew.Endpoint = canonicalizeNewMembershipEndpoint(c, rawDesired.Endpoint, rawNew.Endpoint) + } + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { + rawNew.Description = rawDesired.Description + } else { + if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { + rawNew.Description = rawDesired.Description + } + } + + if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { + rawNew.State = rawDesired.State + } else { + rawNew.State = canonicalizeNewMembershipState(c, rawDesired.State, rawNew.State) + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { + rawNew.DeleteTime = rawDesired.DeleteTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.ExternalId) && dcl.IsEmptyValueIndirect(rawDesired.ExternalId) { + rawNew.ExternalId = rawDesired.ExternalId + } else { + if dcl.StringCanonicalize(rawDesired.ExternalId, rawNew.ExternalId) { + rawNew.ExternalId = rawDesired.ExternalId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.LastConnectionTime) && dcl.IsEmptyValueIndirect(rawDesired.LastConnectionTime) { + rawNew.LastConnectionTime = rawDesired.LastConnectionTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UniqueId) && dcl.IsEmptyValueIndirect(rawDesired.UniqueId) { + rawNew.UniqueId = rawDesired.UniqueId + } else { + if dcl.StringCanonicalize(rawDesired.UniqueId, rawNew.UniqueId) { + rawNew.UniqueId = rawDesired.UniqueId + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Authority) && dcl.IsEmptyValueIndirect(rawDesired.Authority) { + rawNew.Authority = rawDesired.Authority + } else { + rawNew.Authority = canonicalizeNewMembershipAuthority(c, rawDesired.Authority, rawNew.Authority) + } + + if dcl.IsEmptyValueIndirect(rawNew.InfrastructureType) && dcl.IsEmptyValueIndirect(rawDesired.InfrastructureType) { + rawNew.InfrastructureType = rawDesired.InfrastructureType + } else { + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeMembershipEndpoint(des, initial *MembershipEndpoint, opts ...dcl.ApplyOption) *MembershipEndpoint { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpoint{} + + cDes.GkeCluster = canonicalizeMembershipEndpointGkeCluster(des.GkeCluster, initial.GkeCluster, opts...) + cDes.KubernetesResource = canonicalizeMembershipEndpointKubernetesResource(des.KubernetesResource, initial.KubernetesResource, opts...) + + return cDes +} + +func canonicalizeMembershipEndpointSlice(des, initial []MembershipEndpoint, opts ...dcl.ApplyOption) []MembershipEndpoint { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpoint, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpoint(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpoint, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpoint(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpoint(c *Client, des, nw *MembershipEndpoint) *MembershipEndpoint { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpoint while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GkeCluster = canonicalizeNewMembershipEndpointGkeCluster(c, des.GkeCluster, nw.GkeCluster) + nw.KubernetesMetadata = canonicalizeNewMembershipEndpointKubernetesMetadata(c, des.KubernetesMetadata, nw.KubernetesMetadata) + nw.KubernetesResource = canonicalizeNewMembershipEndpointKubernetesResource(c, des.KubernetesResource, nw.KubernetesResource) + + return nw +} + +func canonicalizeNewMembershipEndpointSet(c *Client, des, nw []MembershipEndpoint) []MembershipEndpoint { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpoint + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpoint(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointSlice(c *Client, des, nw []MembershipEndpoint) []MembershipEndpoint { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpoint + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpoint(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointGkeCluster(des, initial *MembershipEndpointGkeCluster, opts ...dcl.ApplyOption) *MembershipEndpointGkeCluster { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointGkeCluster{} + + if dcl.IsZeroValue(des.ResourceLink) || (dcl.IsEmptyValueIndirect(des.ResourceLink) && dcl.IsEmptyValueIndirect(initial.ResourceLink)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ResourceLink = initial.ResourceLink + } else { + cDes.ResourceLink = des.ResourceLink + } + + return cDes +} + +func canonicalizeMembershipEndpointGkeClusterSlice(des, initial []MembershipEndpointGkeCluster, opts ...dcl.ApplyOption) []MembershipEndpointGkeCluster { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointGkeCluster, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointGkeCluster(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointGkeCluster, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointGkeCluster(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointGkeCluster(c *Client, des, nw *MembershipEndpointGkeCluster) *MembershipEndpointGkeCluster { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointGkeCluster while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewMembershipEndpointGkeClusterSet(c *Client, des, nw []MembershipEndpointGkeCluster) []MembershipEndpointGkeCluster { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointGkeCluster + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointGkeClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointGkeCluster(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointGkeClusterSlice(c *Client, des, nw []MembershipEndpointGkeCluster) []MembershipEndpointGkeCluster { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointGkeCluster + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointGkeCluster(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointKubernetesMetadata(des, initial *MembershipEndpointKubernetesMetadata, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesMetadata { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointKubernetesMetadata{} + + return cDes +} + +func canonicalizeMembershipEndpointKubernetesMetadataSlice(des, initial []MembershipEndpointKubernetesMetadata, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesMetadata { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointKubernetesMetadata, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointKubernetesMetadata(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointKubernetesMetadata, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointKubernetesMetadata(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointKubernetesMetadata(c *Client, des, nw *MembershipEndpointKubernetesMetadata) *MembershipEndpointKubernetesMetadata { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesMetadata while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.KubernetesApiServerVersion, nw.KubernetesApiServerVersion) { + nw.KubernetesApiServerVersion = des.KubernetesApiServerVersion + } + if dcl.StringCanonicalize(des.NodeProviderId, nw.NodeProviderId) { + nw.NodeProviderId = des.NodeProviderId + } + + return nw +} + +func canonicalizeNewMembershipEndpointKubernetesMetadataSet(c *Client, des, nw []MembershipEndpointKubernetesMetadata) []MembershipEndpointKubernetesMetadata { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointKubernetesMetadata + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointKubernetesMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointKubernetesMetadata(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointKubernetesMetadataSlice(c *Client, des, nw []MembershipEndpointKubernetesMetadata) []MembershipEndpointKubernetesMetadata { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointKubernetesMetadata + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointKubernetesMetadata(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointKubernetesResource(des, initial *MembershipEndpointKubernetesResource, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResource { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointKubernetesResource{} + + if dcl.StringCanonicalize(des.MembershipCrManifest, initial.MembershipCrManifest) || dcl.IsZeroValue(des.MembershipCrManifest) { + cDes.MembershipCrManifest = initial.MembershipCrManifest + } else { + cDes.MembershipCrManifest = des.MembershipCrManifest + } + cDes.ResourceOptions = canonicalizeMembershipEndpointKubernetesResourceResourceOptions(des.ResourceOptions, initial.ResourceOptions, opts...) + + return cDes +} + +func canonicalizeMembershipEndpointKubernetesResourceSlice(des, initial []MembershipEndpointKubernetesResource, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResource { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointKubernetesResource, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResource(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointKubernetesResource, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResource(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointKubernetesResource(c *Client, des, nw *MembershipEndpointKubernetesResource) *MembershipEndpointKubernetesResource { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResource while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.MembershipCrManifest = des.MembershipCrManifest + nw.MembershipResources = canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSlice(c, des.MembershipResources, nw.MembershipResources) + nw.ConnectResources = canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSlice(c, des.ConnectResources, nw.ConnectResources) + nw.ResourceOptions = canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, des.ResourceOptions, nw.ResourceOptions) + + return nw +} + +func canonicalizeNewMembershipEndpointKubernetesResourceSet(c *Client, des, nw []MembershipEndpointKubernetesResource) []MembershipEndpointKubernetesResource { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointKubernetesResource + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointKubernetesResourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResource(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointKubernetesResourceSlice(c *Client, des, nw []MembershipEndpointKubernetesResource) []MembershipEndpointKubernetesResource { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointKubernetesResource + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResource(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointKubernetesResourceMembershipResources(des, initial *MembershipEndpointKubernetesResourceMembershipResources, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceMembershipResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointKubernetesResourceMembershipResources{} + + if dcl.StringCanonicalize(des.Manifest, initial.Manifest) || dcl.IsZeroValue(des.Manifest) { + cDes.Manifest = initial.Manifest + } else { + cDes.Manifest = des.Manifest + } + if dcl.BoolCanonicalize(des.ClusterScoped, initial.ClusterScoped) || dcl.IsZeroValue(des.ClusterScoped) { + cDes.ClusterScoped = initial.ClusterScoped + } else { + cDes.ClusterScoped = des.ClusterScoped + } + + return cDes +} + +func canonicalizeMembershipEndpointKubernetesResourceMembershipResourcesSlice(des, initial []MembershipEndpointKubernetesResourceMembershipResources, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceMembershipResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceMembershipResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceMembershipResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c *Client, des, nw *MembershipEndpointKubernetesResourceMembershipResources) *MembershipEndpointKubernetesResourceMembershipResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceMembershipResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Manifest, nw.Manifest) { + nw.Manifest = des.Manifest + } + if dcl.BoolCanonicalize(des.ClusterScoped, nw.ClusterScoped) { + nw.ClusterScoped = des.ClusterScoped + } + + return nw +} + +func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSet(c *Client, des, nw []MembershipEndpointKubernetesResourceMembershipResources) []MembershipEndpointKubernetesResourceMembershipResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointKubernetesResourceMembershipResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceMembershipResources) []MembershipEndpointKubernetesResourceMembershipResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointKubernetesResourceMembershipResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointKubernetesResourceConnectResources(des, initial *MembershipEndpointKubernetesResourceConnectResources, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceConnectResources { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointKubernetesResourceConnectResources{} + + if dcl.StringCanonicalize(des.Manifest, initial.Manifest) || dcl.IsZeroValue(des.Manifest) { + cDes.Manifest = initial.Manifest + } else { + cDes.Manifest = des.Manifest + } + if dcl.BoolCanonicalize(des.ClusterScoped, initial.ClusterScoped) || dcl.IsZeroValue(des.ClusterScoped) { + cDes.ClusterScoped = initial.ClusterScoped + } else { + cDes.ClusterScoped = des.ClusterScoped + } + + return cDes +} + +func canonicalizeMembershipEndpointKubernetesResourceConnectResourcesSlice(des, initial []MembershipEndpointKubernetesResourceConnectResources, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceConnectResources { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceConnectResources(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceConnectResources(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c *Client, des, nw *MembershipEndpointKubernetesResourceConnectResources) *MembershipEndpointKubernetesResourceConnectResources { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceConnectResources while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Manifest, nw.Manifest) { + nw.Manifest = des.Manifest + } + if dcl.BoolCanonicalize(des.ClusterScoped, nw.ClusterScoped) { + nw.ClusterScoped = des.ClusterScoped + } + + return nw +} + +func canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSet(c *Client, des, nw []MembershipEndpointKubernetesResourceConnectResources) []MembershipEndpointKubernetesResourceConnectResources { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointKubernetesResourceConnectResources + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceConnectResources) []MembershipEndpointKubernetesResourceConnectResources { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointKubernetesResourceConnectResources + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipEndpointKubernetesResourceResourceOptions(des, initial *MembershipEndpointKubernetesResourceResourceOptions, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceResourceOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipEndpointKubernetesResourceResourceOptions{} + + if dcl.StringCanonicalize(des.ConnectVersion, initial.ConnectVersion) || dcl.IsZeroValue(des.ConnectVersion) { + cDes.ConnectVersion = initial.ConnectVersion + } else { + cDes.ConnectVersion = des.ConnectVersion + } + if dcl.BoolCanonicalize(des.V1Beta1Crd, initial.V1Beta1Crd) || dcl.IsZeroValue(des.V1Beta1Crd) { + cDes.V1Beta1Crd = initial.V1Beta1Crd + } else { + cDes.V1Beta1Crd = des.V1Beta1Crd + } + + return cDes +} + +func canonicalizeMembershipEndpointKubernetesResourceResourceOptionsSlice(des, initial []MembershipEndpointKubernetesResourceResourceOptions, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceResourceOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceResourceOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipEndpointKubernetesResourceResourceOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c *Client, des, nw *MembershipEndpointKubernetesResourceResourceOptions) *MembershipEndpointKubernetesResourceResourceOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceResourceOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.ConnectVersion, nw.ConnectVersion) { + nw.ConnectVersion = des.ConnectVersion + } + if dcl.BoolCanonicalize(des.V1Beta1Crd, nw.V1Beta1Crd) { + nw.V1Beta1Crd = des.V1Beta1Crd + } + + return nw +} + +func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptionsSet(c *Client, des, nw []MembershipEndpointKubernetesResourceResourceOptions) []MembershipEndpointKubernetesResourceResourceOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipEndpointKubernetesResourceResourceOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceResourceOptions) []MembershipEndpointKubernetesResourceResourceOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipEndpointKubernetesResourceResourceOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipState(des, initial *MembershipState, opts ...dcl.ApplyOption) *MembershipState { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipState{} + + return cDes +} + +func canonicalizeMembershipStateSlice(des, initial []MembershipState, opts ...dcl.ApplyOption) []MembershipState { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipState, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipState(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipState, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipState(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipState(c *Client, des, nw *MembershipState) *MembershipState { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipState while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewMembershipStateSet(c *Client, des, nw []MembershipState) []MembershipState { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipState + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipState(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipStateSlice(c *Client, des, nw []MembershipState) []MembershipState { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipState + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipState(c, &d, &n)) + } + + return items +} + +func canonicalizeMembershipAuthority(des, initial *MembershipAuthority, opts ...dcl.ApplyOption) *MembershipAuthority { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &MembershipAuthority{} + + if dcl.StringCanonicalize(des.Issuer, initial.Issuer) || dcl.IsZeroValue(des.Issuer) { + cDes.Issuer = initial.Issuer + } else { + cDes.Issuer = des.Issuer + } + + return cDes +} + +func canonicalizeMembershipAuthoritySlice(des, initial []MembershipAuthority, opts ...dcl.ApplyOption) []MembershipAuthority { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]MembershipAuthority, 0, len(des)) + for _, d := range des { + cd := canonicalizeMembershipAuthority(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]MembershipAuthority, 0, len(des)) + for i, d := range des { + cd := canonicalizeMembershipAuthority(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewMembershipAuthority(c *Client, des, nw *MembershipAuthority) *MembershipAuthority { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for MembershipAuthority while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Issuer, nw.Issuer) { + nw.Issuer = des.Issuer + } + if dcl.StringCanonicalize(des.WorkloadIdentityPool, nw.WorkloadIdentityPool) { + nw.WorkloadIdentityPool = des.WorkloadIdentityPool + } + if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { + nw.IdentityProvider = des.IdentityProvider + } + + return nw +} + +func canonicalizeNewMembershipAuthoritySet(c *Client, des, nw []MembershipAuthority) []MembershipAuthority { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []MembershipAuthority + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareMembershipAuthorityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewMembershipAuthority(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewMembershipAuthoritySlice(c *Client, des, nw []MembershipAuthority) []MembershipAuthority { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []MembershipAuthority + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewMembershipAuthority(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffMembership(c *Client, desired, actual *Membership, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointNewStyle, EmptyObject: EmptyMembershipEndpoint, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipStateNewStyle, EmptyObject: EmptyMembershipState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.ExternalId, actual.ExternalId, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("ExternalId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.LastConnectionTime, actual.LastConnectionTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LastConnectionTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UniqueId, actual.UniqueId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UniqueId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Authority, actual.Authority, dcl.DiffInfo{ObjectFunction: compareMembershipAuthorityNewStyle, EmptyObject: EmptyMembershipAuthority, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Authority")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.InfrastructureType, actual.InfrastructureType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("InfrastructureType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareMembershipEndpointNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpoint) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpoint) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpoint or *MembershipEndpoint", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpoint) + if !ok { + actualNotPointer, ok := a.(MembershipEndpoint) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpoint", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GkeCluster, actual.GkeCluster, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointGkeClusterNewStyle, EmptyObject: EmptyMembershipEndpointGkeCluster, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeCluster")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubernetesMetadata, actual.KubernetesMetadata, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesMetadataNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesMetadata, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesMetadata")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.KubernetesResource, actual.KubernetesResource, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointKubernetesResourceNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResource, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesResource")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointGkeClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointGkeCluster) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointGkeCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointGkeCluster or *MembershipEndpointGkeCluster", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointGkeCluster) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointGkeCluster) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointGkeCluster", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ResourceLink, actual.ResourceLink, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceLink")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointKubernetesMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointKubernetesMetadata) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointKubernetesMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesMetadata or *MembershipEndpointKubernetesMetadata", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointKubernetesMetadata) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointKubernetesMetadata) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesMetadata", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.KubernetesApiServerVersion, actual.KubernetesApiServerVersion, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesApiServerVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodeProviderId, actual.NodeProviderId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeProviderId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.NodeCount, actual.NodeCount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.VcpuCount, actual.VcpuCount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VcpuCount")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MemoryMb, actual.MemoryMb, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryMb")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointKubernetesResourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointKubernetesResource) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointKubernetesResource) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResource or *MembershipEndpointKubernetesResource", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointKubernetesResource) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointKubernetesResource) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResource", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.MembershipCrManifest, actual.MembershipCrManifest, dcl.DiffInfo{Ignore: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipCrManifest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MembershipResources, actual.MembershipResources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceMembershipResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipResources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ConnectResources, actual.ConnectResources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceConnectResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConnectResources")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ResourceOptions, actual.ResourceOptions, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceResourceOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointKubernetesResourceMembershipResources) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceMembershipResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceMembershipResources or *MembershipEndpointKubernetesResourceMembershipResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointKubernetesResourceMembershipResources) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceMembershipResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceMembershipResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Manifest, actual.Manifest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Manifest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterScoped, actual.ClusterScoped, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterScoped")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointKubernetesResourceConnectResources) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceConnectResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceConnectResources or *MembershipEndpointKubernetesResourceConnectResources", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointKubernetesResourceConnectResources) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceConnectResources) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceConnectResources", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Manifest, actual.Manifest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Manifest")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ClusterScoped, actual.ClusterScoped, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterScoped")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipEndpointKubernetesResourceResourceOptions) + if !ok { + desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceResourceOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceResourceOptions or *MembershipEndpointKubernetesResourceResourceOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipEndpointKubernetesResourceResourceOptions) + if !ok { + actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceResourceOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceResourceOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.ConnectVersion, actual.ConnectVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConnectVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.V1Beta1Crd, actual.V1Beta1Crd, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("V1beta1Crd")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipState) + if !ok { + desiredNotPointer, ok := d.(MembershipState) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipState or *MembershipState", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipState) + if !ok { + actualNotPointer, ok := a.(MembershipState) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipState", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Code, actual.Code, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Code")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareMembershipAuthorityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*MembershipAuthority) + if !ok { + desiredNotPointer, ok := d.(MembershipAuthority) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipAuthority or *MembershipAuthority", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*MembershipAuthority) + if !ok { + actualNotPointer, ok := a.(MembershipAuthority) + if !ok { + return nil, fmt.Errorf("obj %v is not a MembershipAuthority", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Issuer, actual.Issuer, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Issuer")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WorkloadIdentityPool, actual.WorkloadIdentityPool, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityPool")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Membership) urlNormalized() *Membership { + normalized := dcl.Copy(*r).(Membership) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.ExternalId = dcl.SelfLinkToName(r.ExternalId) + normalized.UniqueId = dcl.SelfLinkToName(r.UniqueId) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Membership) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateMembership" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Membership resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Membership) marshal(c *Client) ([]byte, error) { + m, err := expandMembership(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Membership: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalMembership decodes JSON responses into the Membership resource schema. +func unmarshalMembership(b []byte, c *Client, res *Membership) (*Membership, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapMembership(m, c, res) +} + +func unmarshalMapMembership(m map[string]interface{}, c *Client, res *Membership) (*Membership, error) { + + flattened := flattenMembership(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandMembership expands Membership into a JSON request object. +func expandMembership(c *Client, f *Membership) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := expandMembershipEndpoint(c, f.Endpoint, res); err != nil { + return nil, fmt.Errorf("error expanding Endpoint into endpoint: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["endpoint"] = v + } + if v, err := dcl.DeriveField("projects/%s/locations/%s/memberships/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.ExternalId; dcl.ValueShouldBeSent(v) { + m["externalId"] = v + } + if v, err := expandMembershipAuthority(c, f.Authority, res); err != nil { + return nil, fmt.Errorf("error expanding Authority into authority: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authority"] = v + } + if v := f.InfrastructureType; dcl.ValueShouldBeSent(v) { + m["infrastructureType"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenMembership flattens Membership from a JSON request object into the +// Membership type. +func flattenMembership(c *Client, i interface{}, res *Membership) *Membership { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Membership{} + resultRes.Endpoint = flattenMembershipEndpoint(c, m["endpoint"], res) + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.State = flattenMembershipState(c, m["state"], res) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) + resultRes.ExternalId = dcl.FlattenString(m["externalId"]) + resultRes.LastConnectionTime = dcl.FlattenString(m["lastConnectionTime"]) + resultRes.UniqueId = dcl.FlattenString(m["uniqueId"]) + resultRes.Authority = flattenMembershipAuthority(c, m["authority"], res) + resultRes.InfrastructureType = flattenMembershipInfrastructureTypeEnum(m["infrastructureType"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandMembershipEndpointMap expands the contents of MembershipEndpoint into a JSON +// request object. +func expandMembershipEndpointMap(c *Client, f map[string]MembershipEndpoint, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpoint(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointSlice expands the contents of MembershipEndpoint into a JSON +// request object. +func expandMembershipEndpointSlice(c *Client, f []MembershipEndpoint, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpoint(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointMap flattens the contents of MembershipEndpoint from a JSON +// response object. +func flattenMembershipEndpointMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpoint { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpoint{} + } + + if len(a) == 0 { + return map[string]MembershipEndpoint{} + } + + items := make(map[string]MembershipEndpoint) + for k, item := range a { + items[k] = *flattenMembershipEndpoint(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointSlice flattens the contents of MembershipEndpoint from a JSON +// response object. +func flattenMembershipEndpointSlice(c *Client, i interface{}, res *Membership) []MembershipEndpoint { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpoint{} + } + + if len(a) == 0 { + return []MembershipEndpoint{} + } + + items := make([]MembershipEndpoint, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpoint(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpoint expands an instance of MembershipEndpoint into a JSON +// request object. +func expandMembershipEndpoint(c *Client, f *MembershipEndpoint, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandMembershipEndpointGkeCluster(c, f.GkeCluster, res); err != nil { + return nil, fmt.Errorf("error expanding GkeCluster into gkeCluster: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gkeCluster"] = v + } + if v, err := expandMembershipEndpointKubernetesResource(c, f.KubernetesResource, res); err != nil { + return nil, fmt.Errorf("error expanding KubernetesResource into kubernetesResource: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetesResource"] = v + } + + return m, nil +} + +// flattenMembershipEndpoint flattens an instance of MembershipEndpoint from a JSON +// response object. +func flattenMembershipEndpoint(c *Client, i interface{}, res *Membership) *MembershipEndpoint { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpoint{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpoint + } + r.GkeCluster = flattenMembershipEndpointGkeCluster(c, m["gkeCluster"], res) + r.KubernetesMetadata = flattenMembershipEndpointKubernetesMetadata(c, m["kubernetesMetadata"], res) + r.KubernetesResource = flattenMembershipEndpointKubernetesResource(c, m["kubernetesResource"], res) + + return r +} + +// expandMembershipEndpointGkeClusterMap expands the contents of MembershipEndpointGkeCluster into a JSON +// request object. +func expandMembershipEndpointGkeClusterMap(c *Client, f map[string]MembershipEndpointGkeCluster, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointGkeCluster(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointGkeClusterSlice expands the contents of MembershipEndpointGkeCluster into a JSON +// request object. +func expandMembershipEndpointGkeClusterSlice(c *Client, f []MembershipEndpointGkeCluster, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointGkeCluster(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointGkeClusterMap flattens the contents of MembershipEndpointGkeCluster from a JSON +// response object. +func flattenMembershipEndpointGkeClusterMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointGkeCluster { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointGkeCluster{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointGkeCluster{} + } + + items := make(map[string]MembershipEndpointGkeCluster) + for k, item := range a { + items[k] = *flattenMembershipEndpointGkeCluster(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointGkeClusterSlice flattens the contents of MembershipEndpointGkeCluster from a JSON +// response object. +func flattenMembershipEndpointGkeClusterSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointGkeCluster { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointGkeCluster{} + } + + if len(a) == 0 { + return []MembershipEndpointGkeCluster{} + } + + items := make([]MembershipEndpointGkeCluster, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointGkeCluster(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointGkeCluster expands an instance of MembershipEndpointGkeCluster into a JSON +// request object. +func expandMembershipEndpointGkeCluster(c *Client, f *MembershipEndpointGkeCluster, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandHubReferenceLink(c, f.ResourceLink, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceLink into resourceLink: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceLink"] = v + } + + return m, nil +} + +// flattenMembershipEndpointGkeCluster flattens an instance of MembershipEndpointGkeCluster from a JSON +// response object. +func flattenMembershipEndpointGkeCluster(c *Client, i interface{}, res *Membership) *MembershipEndpointGkeCluster { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointGkeCluster{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointGkeCluster + } + r.ResourceLink = flattenHubReferenceLink(c, m["resourceLink"], res) + + return r +} + +// expandMembershipEndpointKubernetesMetadataMap expands the contents of MembershipEndpointKubernetesMetadata into a JSON +// request object. +func expandMembershipEndpointKubernetesMetadataMap(c *Client, f map[string]MembershipEndpointKubernetesMetadata, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointKubernetesMetadata(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointKubernetesMetadataSlice expands the contents of MembershipEndpointKubernetesMetadata into a JSON +// request object. +func expandMembershipEndpointKubernetesMetadataSlice(c *Client, f []MembershipEndpointKubernetesMetadata, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointKubernetesMetadata(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointKubernetesMetadataMap flattens the contents of MembershipEndpointKubernetesMetadata from a JSON +// response object. +func flattenMembershipEndpointKubernetesMetadataMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesMetadata { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointKubernetesMetadata{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointKubernetesMetadata{} + } + + items := make(map[string]MembershipEndpointKubernetesMetadata) + for k, item := range a { + items[k] = *flattenMembershipEndpointKubernetesMetadata(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointKubernetesMetadataSlice flattens the contents of MembershipEndpointKubernetesMetadata from a JSON +// response object. +func flattenMembershipEndpointKubernetesMetadataSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesMetadata { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointKubernetesMetadata{} + } + + if len(a) == 0 { + return []MembershipEndpointKubernetesMetadata{} + } + + items := make([]MembershipEndpointKubernetesMetadata, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointKubernetesMetadata(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointKubernetesMetadata expands an instance of MembershipEndpointKubernetesMetadata into a JSON +// request object. +func expandMembershipEndpointKubernetesMetadata(c *Client, f *MembershipEndpointKubernetesMetadata, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenMembershipEndpointKubernetesMetadata flattens an instance of MembershipEndpointKubernetesMetadata from a JSON +// response object. +func flattenMembershipEndpointKubernetesMetadata(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesMetadata { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointKubernetesMetadata{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointKubernetesMetadata + } + r.KubernetesApiServerVersion = dcl.FlattenString(m["kubernetesApiServerVersion"]) + r.NodeProviderId = dcl.FlattenString(m["nodeProviderId"]) + r.NodeCount = dcl.FlattenInteger(m["nodeCount"]) + r.VcpuCount = dcl.FlattenInteger(m["vcpuCount"]) + r.MemoryMb = dcl.FlattenInteger(m["memoryMb"]) + r.UpdateTime = dcl.FlattenString(m["updateTime"]) + + return r +} + +// expandMembershipEndpointKubernetesResourceMap expands the contents of MembershipEndpointKubernetesResource into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceMap(c *Client, f map[string]MembershipEndpointKubernetesResource, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointKubernetesResource(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointKubernetesResourceSlice expands the contents of MembershipEndpointKubernetesResource into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceSlice(c *Client, f []MembershipEndpointKubernetesResource, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointKubernetesResource(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointKubernetesResourceMap flattens the contents of MembershipEndpointKubernetesResource from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResource { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointKubernetesResource{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointKubernetesResource{} + } + + items := make(map[string]MembershipEndpointKubernetesResource) + for k, item := range a { + items[k] = *flattenMembershipEndpointKubernetesResource(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointKubernetesResourceSlice flattens the contents of MembershipEndpointKubernetesResource from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResource { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointKubernetesResource{} + } + + if len(a) == 0 { + return []MembershipEndpointKubernetesResource{} + } + + items := make([]MembershipEndpointKubernetesResource, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointKubernetesResource(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointKubernetesResource expands an instance of MembershipEndpointKubernetesResource into a JSON +// request object. +func expandMembershipEndpointKubernetesResource(c *Client, f *MembershipEndpointKubernetesResource, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.MembershipCrManifest; !dcl.IsEmptyValueIndirect(v) { + m["membershipCrManifest"] = v + } + if v, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, f.ResourceOptions, res); err != nil { + return nil, fmt.Errorf("error expanding ResourceOptions into resourceOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["resourceOptions"] = v + } + + return m, nil +} + +// flattenMembershipEndpointKubernetesResource flattens an instance of MembershipEndpointKubernetesResource from a JSON +// response object. +func flattenMembershipEndpointKubernetesResource(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResource { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointKubernetesResource{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointKubernetesResource + } + r.MembershipCrManifest = dcl.FlattenSecretValue(m["membershipCrManifest"]) + r.MembershipResources = flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice(c, m["membershipResources"], res) + r.ConnectResources = flattenMembershipEndpointKubernetesResourceConnectResourcesSlice(c, m["connectResources"], res) + r.ResourceOptions = flattenMembershipEndpointKubernetesResourceResourceOptions(c, m["resourceOptions"], res) + + return r +} + +// expandMembershipEndpointKubernetesResourceMembershipResourcesMap expands the contents of MembershipEndpointKubernetesResourceMembershipResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceMembershipResourcesMap(c *Client, f map[string]MembershipEndpointKubernetesResourceMembershipResources, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointKubernetesResourceMembershipResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointKubernetesResourceMembershipResourcesSlice expands the contents of MembershipEndpointKubernetesResourceMembershipResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, f []MembershipEndpointKubernetesResourceMembershipResources, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointKubernetesResourceMembershipResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointKubernetesResourceMembershipResourcesMap flattens the contents of MembershipEndpointKubernetesResourceMembershipResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceMembershipResourcesMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceMembershipResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointKubernetesResourceMembershipResources{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointKubernetesResourceMembershipResources{} + } + + items := make(map[string]MembershipEndpointKubernetesResourceMembershipResources) + for k, item := range a { + items[k] = *flattenMembershipEndpointKubernetesResourceMembershipResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice flattens the contents of MembershipEndpointKubernetesResourceMembershipResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceMembershipResources { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointKubernetesResourceMembershipResources{} + } + + if len(a) == 0 { + return []MembershipEndpointKubernetesResourceMembershipResources{} + } + + items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointKubernetesResourceMembershipResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointKubernetesResourceMembershipResources expands an instance of MembershipEndpointKubernetesResourceMembershipResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceMembershipResources(c *Client, f *MembershipEndpointKubernetesResourceMembershipResources, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Manifest; !dcl.IsEmptyValueIndirect(v) { + m["manifest"] = v + } + if v := f.ClusterScoped; !dcl.IsEmptyValueIndirect(v) { + m["clusterScoped"] = v + } + + return m, nil +} + +// flattenMembershipEndpointKubernetesResourceMembershipResources flattens an instance of MembershipEndpointKubernetesResourceMembershipResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceMembershipResources(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceMembershipResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointKubernetesResourceMembershipResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointKubernetesResourceMembershipResources + } + r.Manifest = dcl.FlattenString(m["manifest"]) + r.ClusterScoped = dcl.FlattenBool(m["clusterScoped"]) + + return r +} + +// expandMembershipEndpointKubernetesResourceConnectResourcesMap expands the contents of MembershipEndpointKubernetesResourceConnectResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceConnectResourcesMap(c *Client, f map[string]MembershipEndpointKubernetesResourceConnectResources, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointKubernetesResourceConnectResources(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointKubernetesResourceConnectResourcesSlice expands the contents of MembershipEndpointKubernetesResourceConnectResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, f []MembershipEndpointKubernetesResourceConnectResources, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointKubernetesResourceConnectResources(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointKubernetesResourceConnectResourcesMap flattens the contents of MembershipEndpointKubernetesResourceConnectResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceConnectResourcesMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceConnectResources { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointKubernetesResourceConnectResources{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointKubernetesResourceConnectResources{} + } + + items := make(map[string]MembershipEndpointKubernetesResourceConnectResources) + for k, item := range a { + items[k] = *flattenMembershipEndpointKubernetesResourceConnectResources(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointKubernetesResourceConnectResourcesSlice flattens the contents of MembershipEndpointKubernetesResourceConnectResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceConnectResources { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointKubernetesResourceConnectResources{} + } + + if len(a) == 0 { + return []MembershipEndpointKubernetesResourceConnectResources{} + } + + items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointKubernetesResourceConnectResources(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointKubernetesResourceConnectResources expands an instance of MembershipEndpointKubernetesResourceConnectResources into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceConnectResources(c *Client, f *MembershipEndpointKubernetesResourceConnectResources, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Manifest; !dcl.IsEmptyValueIndirect(v) { + m["manifest"] = v + } + if v := f.ClusterScoped; !dcl.IsEmptyValueIndirect(v) { + m["clusterScoped"] = v + } + + return m, nil +} + +// flattenMembershipEndpointKubernetesResourceConnectResources flattens an instance of MembershipEndpointKubernetesResourceConnectResources from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceConnectResources(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceConnectResources { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointKubernetesResourceConnectResources{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointKubernetesResourceConnectResources + } + r.Manifest = dcl.FlattenString(m["manifest"]) + r.ClusterScoped = dcl.FlattenBool(m["clusterScoped"]) + + return r +} + +// expandMembershipEndpointKubernetesResourceResourceOptionsMap expands the contents of MembershipEndpointKubernetesResourceResourceOptions into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceResourceOptionsMap(c *Client, f map[string]MembershipEndpointKubernetesResourceResourceOptions, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipEndpointKubernetesResourceResourceOptionsSlice expands the contents of MembershipEndpointKubernetesResourceResourceOptions into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, f []MembershipEndpointKubernetesResourceResourceOptions, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipEndpointKubernetesResourceResourceOptionsMap flattens the contents of MembershipEndpointKubernetesResourceResourceOptions from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceResourceOptionsMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceResourceOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipEndpointKubernetesResourceResourceOptions{} + } + + if len(a) == 0 { + return map[string]MembershipEndpointKubernetesResourceResourceOptions{} + } + + items := make(map[string]MembershipEndpointKubernetesResourceResourceOptions) + for k, item := range a { + items[k] = *flattenMembershipEndpointKubernetesResourceResourceOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipEndpointKubernetesResourceResourceOptionsSlice flattens the contents of MembershipEndpointKubernetesResourceResourceOptions from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceResourceOptions { + a, ok := i.([]interface{}) + if !ok { + return []MembershipEndpointKubernetesResourceResourceOptions{} + } + + if len(a) == 0 { + return []MembershipEndpointKubernetesResourceResourceOptions{} + } + + items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipEndpointKubernetesResourceResourceOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipEndpointKubernetesResourceResourceOptions expands an instance of MembershipEndpointKubernetesResourceResourceOptions into a JSON +// request object. +func expandMembershipEndpointKubernetesResourceResourceOptions(c *Client, f *MembershipEndpointKubernetesResourceResourceOptions, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.ConnectVersion; !dcl.IsEmptyValueIndirect(v) { + m["connectVersion"] = v + } + if v := f.V1Beta1Crd; !dcl.IsEmptyValueIndirect(v) { + m["v1beta1Crd"] = v + } + + return m, nil +} + +// flattenMembershipEndpointKubernetesResourceResourceOptions flattens an instance of MembershipEndpointKubernetesResourceResourceOptions from a JSON +// response object. +func flattenMembershipEndpointKubernetesResourceResourceOptions(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceResourceOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipEndpointKubernetesResourceResourceOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipEndpointKubernetesResourceResourceOptions + } + r.ConnectVersion = dcl.FlattenString(m["connectVersion"]) + r.V1Beta1Crd = dcl.FlattenBool(m["v1beta1Crd"]) + + return r +} + +// expandMembershipStateMap expands the contents of MembershipState into a JSON +// request object. +func expandMembershipStateMap(c *Client, f map[string]MembershipState, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipState(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipStateSlice expands the contents of MembershipState into a JSON +// request object. +func expandMembershipStateSlice(c *Client, f []MembershipState, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipState(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipStateMap flattens the contents of MembershipState from a JSON +// response object. +func flattenMembershipStateMap(c *Client, i interface{}, res *Membership) map[string]MembershipState { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipState{} + } + + if len(a) == 0 { + return map[string]MembershipState{} + } + + items := make(map[string]MembershipState) + for k, item := range a { + items[k] = *flattenMembershipState(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipStateSlice flattens the contents of MembershipState from a JSON +// response object. +func flattenMembershipStateSlice(c *Client, i interface{}, res *Membership) []MembershipState { + a, ok := i.([]interface{}) + if !ok { + return []MembershipState{} + } + + if len(a) == 0 { + return []MembershipState{} + } + + items := make([]MembershipState, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipState(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipState expands an instance of MembershipState into a JSON +// request object. +func expandMembershipState(c *Client, f *MembershipState, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenMembershipState flattens an instance of MembershipState from a JSON +// response object. +func flattenMembershipState(c *Client, i interface{}, res *Membership) *MembershipState { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipState{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipState + } + r.Code = flattenMembershipStateCodeEnum(m["code"]) + + return r +} + +// expandMembershipAuthorityMap expands the contents of MembershipAuthority into a JSON +// request object. +func expandMembershipAuthorityMap(c *Client, f map[string]MembershipAuthority, res *Membership) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandMembershipAuthority(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandMembershipAuthoritySlice expands the contents of MembershipAuthority into a JSON +// request object. +func expandMembershipAuthoritySlice(c *Client, f []MembershipAuthority, res *Membership) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandMembershipAuthority(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenMembershipAuthorityMap flattens the contents of MembershipAuthority from a JSON +// response object. +func flattenMembershipAuthorityMap(c *Client, i interface{}, res *Membership) map[string]MembershipAuthority { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipAuthority{} + } + + if len(a) == 0 { + return map[string]MembershipAuthority{} + } + + items := make(map[string]MembershipAuthority) + for k, item := range a { + items[k] = *flattenMembershipAuthority(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenMembershipAuthoritySlice flattens the contents of MembershipAuthority from a JSON +// response object. +func flattenMembershipAuthoritySlice(c *Client, i interface{}, res *Membership) []MembershipAuthority { + a, ok := i.([]interface{}) + if !ok { + return []MembershipAuthority{} + } + + if len(a) == 0 { + return []MembershipAuthority{} + } + + items := make([]MembershipAuthority, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipAuthority(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandMembershipAuthority expands an instance of MembershipAuthority into a JSON +// request object. +func expandMembershipAuthority(c *Client, f *MembershipAuthority, res *Membership) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Issuer; !dcl.IsEmptyValueIndirect(v) { + m["issuer"] = v + } + + return m, nil +} + +// flattenMembershipAuthority flattens an instance of MembershipAuthority from a JSON +// response object. +func flattenMembershipAuthority(c *Client, i interface{}, res *Membership) *MembershipAuthority { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &MembershipAuthority{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyMembershipAuthority + } + r.Issuer = dcl.FlattenString(m["issuer"]) + r.WorkloadIdentityPool = dcl.FlattenString(m["workloadIdentityPool"]) + r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) + + return r +} + +// flattenMembershipStateCodeEnumMap flattens the contents of MembershipStateCodeEnum from a JSON +// response object. +func flattenMembershipStateCodeEnumMap(c *Client, i interface{}, res *Membership) map[string]MembershipStateCodeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipStateCodeEnum{} + } + + if len(a) == 0 { + return map[string]MembershipStateCodeEnum{} + } + + items := make(map[string]MembershipStateCodeEnum) + for k, item := range a { + items[k] = *flattenMembershipStateCodeEnum(item.(interface{})) + } + + return items +} + +// flattenMembershipStateCodeEnumSlice flattens the contents of MembershipStateCodeEnum from a JSON +// response object. +func flattenMembershipStateCodeEnumSlice(c *Client, i interface{}, res *Membership) []MembershipStateCodeEnum { + a, ok := i.([]interface{}) + if !ok { + return []MembershipStateCodeEnum{} + } + + if len(a) == 0 { + return []MembershipStateCodeEnum{} + } + + items := make([]MembershipStateCodeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipStateCodeEnum(item.(interface{}))) + } + + return items +} + +// flattenMembershipStateCodeEnum asserts that an interface is a string, and returns a +// pointer to a *MembershipStateCodeEnum with the same value as that string. +func flattenMembershipStateCodeEnum(i interface{}) *MembershipStateCodeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return MembershipStateCodeEnumRef(s) +} + +// flattenMembershipInfrastructureTypeEnumMap flattens the contents of MembershipInfrastructureTypeEnum from a JSON +// response object. +func flattenMembershipInfrastructureTypeEnumMap(c *Client, i interface{}, res *Membership) map[string]MembershipInfrastructureTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]MembershipInfrastructureTypeEnum{} + } + + if len(a) == 0 { + return map[string]MembershipInfrastructureTypeEnum{} + } + + items := make(map[string]MembershipInfrastructureTypeEnum) + for k, item := range a { + items[k] = *flattenMembershipInfrastructureTypeEnum(item.(interface{})) + } + + return items +} + +// flattenMembershipInfrastructureTypeEnumSlice flattens the contents of MembershipInfrastructureTypeEnum from a JSON +// response object. +func flattenMembershipInfrastructureTypeEnumSlice(c *Client, i interface{}, res *Membership) []MembershipInfrastructureTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []MembershipInfrastructureTypeEnum{} + } + + if len(a) == 0 { + return []MembershipInfrastructureTypeEnum{} + } + + items := make([]MembershipInfrastructureTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenMembershipInfrastructureTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenMembershipInfrastructureTypeEnum asserts that an interface is a string, and returns a +// pointer to a *MembershipInfrastructureTypeEnum with the same value as that string. +func flattenMembershipInfrastructureTypeEnum(i interface{}) *MembershipInfrastructureTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return MembershipInfrastructureTypeEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Membership) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalMembership(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type membershipDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp membershipApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToMembershipDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]membershipDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []membershipDiff + // For each operation name, create a membershipDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := membershipDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToMembershipApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToMembershipApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (membershipApiOperation, error) { + switch opName { + + case "updateMembershipUpdateMembershipOperation": + return &updateMembershipUpdateMembershipOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractMembershipFields(r *Membership) error { + vEndpoint := r.Endpoint + if vEndpoint == nil { + // note: explicitly not the empty object. + vEndpoint = &MembershipEndpoint{} + } + if err := extractMembershipEndpointFields(r, vEndpoint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpoint) { + r.Endpoint = vEndpoint + } + vState := r.State + if vState == nil { + // note: explicitly not the empty object. + vState = &MembershipState{} + } + if err := extractMembershipStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + r.State = vState + } + vAuthority := r.Authority + if vAuthority == nil { + // note: explicitly not the empty object. + vAuthority = &MembershipAuthority{} + } + if err := extractMembershipAuthorityFields(r, vAuthority); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthority) { + r.Authority = vAuthority + } + return nil +} +func extractMembershipEndpointFields(r *Membership, o *MembershipEndpoint) error { + vGkeCluster := o.GkeCluster + if vGkeCluster == nil { + // note: explicitly not the empty object. + vGkeCluster = &MembershipEndpointGkeCluster{} + } + if err := extractMembershipEndpointGkeClusterFields(r, vGkeCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeCluster) { + o.GkeCluster = vGkeCluster + } + vKubernetesMetadata := o.KubernetesMetadata + if vKubernetesMetadata == nil { + // note: explicitly not the empty object. + vKubernetesMetadata = &MembershipEndpointKubernetesMetadata{} + } + if err := extractMembershipEndpointKubernetesMetadataFields(r, vKubernetesMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesMetadata) { + o.KubernetesMetadata = vKubernetesMetadata + } + vKubernetesResource := o.KubernetesResource + if vKubernetesResource == nil { + // note: explicitly not the empty object. + vKubernetesResource = &MembershipEndpointKubernetesResource{} + } + if err := extractMembershipEndpointKubernetesResourceFields(r, vKubernetesResource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesResource) { + o.KubernetesResource = vKubernetesResource + } + return nil +} +func extractMembershipEndpointGkeClusterFields(r *Membership, o *MembershipEndpointGkeCluster) error { + return nil +} +func extractMembershipEndpointKubernetesMetadataFields(r *Membership, o *MembershipEndpointKubernetesMetadata) error { + return nil +} +func extractMembershipEndpointKubernetesResourceFields(r *Membership, o *MembershipEndpointKubernetesResource) error { + vResourceOptions := o.ResourceOptions + if vResourceOptions == nil { + // note: explicitly not the empty object. + vResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{} + } + if err := extractMembershipEndpointKubernetesResourceResourceOptionsFields(r, vResourceOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceOptions) { + o.ResourceOptions = vResourceOptions + } + return nil +} +func extractMembershipEndpointKubernetesResourceMembershipResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceMembershipResources) error { + return nil +} +func extractMembershipEndpointKubernetesResourceConnectResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceConnectResources) error { + return nil +} +func extractMembershipEndpointKubernetesResourceResourceOptionsFields(r *Membership, o *MembershipEndpointKubernetesResourceResourceOptions) error { + return nil +} +func extractMembershipStateFields(r *Membership, o *MembershipState) error { + return nil +} +func extractMembershipAuthorityFields(r *Membership, o *MembershipAuthority) error { + return nil +} + +func postReadExtractMembershipFields(r *Membership) error { + vEndpoint := r.Endpoint + if vEndpoint == nil { + // note: explicitly not the empty object. + vEndpoint = &MembershipEndpoint{} + } + if err := postReadExtractMembershipEndpointFields(r, vEndpoint); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vEndpoint) { + r.Endpoint = vEndpoint + } + vState := r.State + if vState == nil { + // note: explicitly not the empty object. + vState = &MembershipState{} + } + if err := postReadExtractMembershipStateFields(r, vState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vState) { + r.State = vState + } + vAuthority := r.Authority + if vAuthority == nil { + // note: explicitly not the empty object. + vAuthority = &MembershipAuthority{} + } + if err := postReadExtractMembershipAuthorityFields(r, vAuthority); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthority) { + r.Authority = vAuthority + } + return nil +} +func postReadExtractMembershipEndpointFields(r *Membership, o *MembershipEndpoint) error { + vGkeCluster := o.GkeCluster + if vGkeCluster == nil { + // note: explicitly not the empty object. + vGkeCluster = &MembershipEndpointGkeCluster{} + } + if err := extractMembershipEndpointGkeClusterFields(r, vGkeCluster); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGkeCluster) { + o.GkeCluster = vGkeCluster + } + vKubernetesMetadata := o.KubernetesMetadata + if vKubernetesMetadata == nil { + // note: explicitly not the empty object. + vKubernetesMetadata = &MembershipEndpointKubernetesMetadata{} + } + if err := extractMembershipEndpointKubernetesMetadataFields(r, vKubernetesMetadata); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesMetadata) { + o.KubernetesMetadata = vKubernetesMetadata + } + vKubernetesResource := o.KubernetesResource + if vKubernetesResource == nil { + // note: explicitly not the empty object. + vKubernetesResource = &MembershipEndpointKubernetesResource{} + } + if err := extractMembershipEndpointKubernetesResourceFields(r, vKubernetesResource); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetesResource) { + o.KubernetesResource = vKubernetesResource + } + return nil +} +func postReadExtractMembershipEndpointGkeClusterFields(r *Membership, o *MembershipEndpointGkeCluster) error { + return nil +} +func postReadExtractMembershipEndpointKubernetesMetadataFields(r *Membership, o *MembershipEndpointKubernetesMetadata) error { + return nil +} +func postReadExtractMembershipEndpointKubernetesResourceFields(r *Membership, o *MembershipEndpointKubernetesResource) error { + vResourceOptions := o.ResourceOptions + if vResourceOptions == nil { + // note: explicitly not the empty object. + vResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{} + } + if err := extractMembershipEndpointKubernetesResourceResourceOptionsFields(r, vResourceOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vResourceOptions) { + o.ResourceOptions = vResourceOptions + } + return nil +} +func postReadExtractMembershipEndpointKubernetesResourceMembershipResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceMembershipResources) error { + return nil +} +func postReadExtractMembershipEndpointKubernetesResourceConnectResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceConnectResources) error { + return nil +} +func postReadExtractMembershipEndpointKubernetesResourceResourceOptionsFields(r *Membership, o *MembershipEndpointKubernetesResourceResourceOptions) error { + return nil +} +func postReadExtractMembershipStateFields(r *Membership, o *MembershipState) error { + return nil +} +func postReadExtractMembershipAuthorityFields(r *Membership, o *MembershipAuthority) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl new file mode 100644 index 000000000000..ebdafd24155b --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl @@ -0,0 +1,410 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLMembershipSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "GkeHub/Membership", + Description: "The GkeHub Membership resource", + StructName: "Membership", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Membership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "membership", + Required: true, + Description: "A full instance of a Membership", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Membership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "membership", + Required: true, + Description: "A full instance of a Membership", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Membership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "membership", + Required: true, + Description: "A full instance of a Membership", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Membership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Membership", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Membership": &dcl.Component{ + Title: "Membership", + ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", + UsesStateHint: true, + ParentContainer: "project", + LabelsField: "labels", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "authority": &dcl.Property{ + Type: "object", + GoName: "Authority", + GoType: "MembershipAuthority", + Description: "Optional. How to identify workloads from this Membership. See the documentation on Workload Identity for more details: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity", + Properties: map[string]*dcl.Property{ + "identityProvider": &dcl.Property{ + Type: "string", + GoName: "IdentityProvider", + ReadOnly: true, + Description: "Output only. An identity provider that reflects the `issuer` in the workload identity pool.", + }, + "issuer": &dcl.Property{ + Type: "string", + GoName: "Issuer", + Description: "Optional. A JSON Web Token (JWT) issuer URI. `issuer` must start with `https://` and be a valid URL with length <2000 characters. If set, then Google will allow valid OIDC tokens from this issuer to authenticate within the workload_identity_pool. OIDC discovery will be performed on this URI to validate tokens from the issuer. Clearing `issuer` disables Workload Identity. `issuer` cannot be directly modified; it must be cleared (and Workload Identity disabled) before using a new issuer (and re-enabling Workload Identity).", + }, + "workloadIdentityPool": &dcl.Property{ + Type: "string", + GoName: "WorkloadIdentityPool", + ReadOnly: true, + Description: "Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.", + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. When the Membership was created.", + Immutable: true, + }, + "deleteTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "DeleteTime", + ReadOnly: true, + Description: "Output only. When the Membership was deleted.", + Immutable: true, + }, + "description": &dcl.Property{ + Type: "string", + GoName: "Description", + Description: "Description of this membership, limited to 63 characters. Must match the regex: `*` This field is present for legacy purposes.", + }, + "endpoint": &dcl.Property{ + Type: "object", + GoName: "Endpoint", + GoType: "MembershipEndpoint", + Description: "Optional. Endpoint information to reach this member.", + Properties: map[string]*dcl.Property{ + "gkeCluster": &dcl.Property{ + Type: "object", + GoName: "GkeCluster", + GoType: "MembershipEndpointGkeCluster", + Description: "Optional. GKE-specific information. Only present if this Membership is a GKE cluster.", + Properties: map[string]*dcl.Property{ + "resourceLink": &dcl.Property{ + Type: "string", + GoName: "ResourceLink", + Description: "Immutable. Self-link of the GCP resource for the GKE cluster. For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster Zonal clusters are also supported.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Container/Cluster", + Field: "selfLink", + }, + }, + }, + }, + }, + "kubernetesMetadata": &dcl.Property{ + Type: "object", + GoName: "KubernetesMetadata", + GoType: "MembershipEndpointKubernetesMetadata", + ReadOnly: true, + Description: "Output only. Useful Kubernetes-specific metadata.", + Properties: map[string]*dcl.Property{ + "kubernetesApiServerVersion": &dcl.Property{ + Type: "string", + GoName: "KubernetesApiServerVersion", + ReadOnly: true, + Description: "Output only. Kubernetes API server version string as reported by `/version`.", + }, + "memoryMb": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "MemoryMb", + ReadOnly: true, + Description: "Output only. The total memory capacity as reported by the sum of all Kubernetes nodes resources, defined in MB.", + }, + "nodeCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "NodeCount", + ReadOnly: true, + Description: "Output only. Node count as reported by Kubernetes nodes resources.", + }, + "nodeProviderId": &dcl.Property{ + Type: "string", + GoName: "NodeProviderId", + ReadOnly: true, + Description: "Output only. Node providerID as reported by the first node in the list of nodes on the Kubernetes endpoint. On Kubernetes platforms that support zero-node clusters (like GKE-on-GCP), the node_count will be zero and the node_provider_id will be empty.", + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. The time at which these details were last updated. This update_time is different from the Membership-level update_time since EndpointDetails are updated internally for API consumers.", + }, + "vcpuCount": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "VcpuCount", + ReadOnly: true, + Description: "Output only. vCPU count as reported by Kubernetes nodes resources.", + }, + }, + }, + "kubernetesResource": &dcl.Property{ + Type: "object", + GoName: "KubernetesResource", + GoType: "MembershipEndpointKubernetesResource", + Description: "Optional. The in-cluster Kubernetes Resources that should be applied for a correctly registered cluster, in the steady state. These resources: * Ensure that the cluster is exclusively registered to one and only one Hub Membership. * Propagate Workload Pool Information available in the Membership Authority field. * Ensure proper initial configuration of default Hub Features.", + Properties: map[string]*dcl.Property{ + "connectResources": &dcl.Property{ + Type: "array", + GoName: "ConnectResources", + ReadOnly: true, + Description: "Output only. The Kubernetes resources for installing the GKE Connect agent This field is only populated in the Membership returned from a successful long-running operation from CreateMembership or UpdateMembership. It is not populated during normal GetMembership or ListMemberships requests. To get the resource manifest after the initial registration, the caller should make a UpdateMembership call with an empty field mask.", + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "MembershipEndpointKubernetesResourceConnectResources", + Properties: map[string]*dcl.Property{ + "clusterScoped": &dcl.Property{ + Type: "boolean", + GoName: "ClusterScoped", + Description: "Whether the resource provided in the manifest is `cluster_scoped`. If unset, the manifest is assumed to be namespace scoped. This field is used for REST mapping when applying the resource in a cluster.", + }, + "manifest": &dcl.Property{ + Type: "string", + GoName: "Manifest", + Description: "YAML manifest of the resource.", + }, + }, + }, + }, + "membershipCrManifest": &dcl.Property{ + Type: "string", + GoName: "MembershipCrManifest", + Description: "Input only. The YAML representation of the Membership CR. This field is ignored for GKE clusters where Hub can read the CR directly. Callers should provide the CR that is currently present in the cluster during CreateMembership or UpdateMembership, or leave this field empty if none exists. The CR manifest is used to validate the cluster has not been registered with another Membership.", + Unreadable: true, + }, + "membershipResources": &dcl.Property{ + Type: "array", + GoName: "MembershipResources", + ReadOnly: true, + Description: "Output only. Additional Kubernetes resources that need to be applied to the cluster after Membership creation, and after every update. This field is only populated in the Membership returned from a successful long-running operation from CreateMembership or UpdateMembership. It is not populated during normal GetMembership or ListMemberships requests. To get the resource manifest after the initial registration, the caller should make a UpdateMembership call with an empty field mask.", + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "MembershipEndpointKubernetesResourceMembershipResources", + Properties: map[string]*dcl.Property{ + "clusterScoped": &dcl.Property{ + Type: "boolean", + GoName: "ClusterScoped", + Description: "Whether the resource provided in the manifest is `cluster_scoped`. If unset, the manifest is assumed to be namespace scoped. This field is used for REST mapping when applying the resource in a cluster.", + }, + "manifest": &dcl.Property{ + Type: "string", + GoName: "Manifest", + Description: "YAML manifest of the resource.", + }, + }, + }, + }, + "resourceOptions": &dcl.Property{ + Type: "object", + GoName: "ResourceOptions", + GoType: "MembershipEndpointKubernetesResourceResourceOptions", + Description: "Optional. Options for Kubernetes resource generation.", + Properties: map[string]*dcl.Property{ + "connectVersion": &dcl.Property{ + Type: "string", + GoName: "ConnectVersion", + Description: "Optional. The Connect agent version to use for connect_resources. Defaults to the latest GKE Connect version. The version must be a currently supported version, obsolete versions will be rejected.", + }, + "v1beta1Crd": &dcl.Property{ + Type: "boolean", + GoName: "V1Beta1Crd", + Description: "Optional. Use `apiextensions/v1beta1` instead of `apiextensions/v1` for CustomResourceDefinition resources. This option should be set for clusters with Kubernetes apiserver versions <1.16.", + }, + }, + }, + }, + }, + }, + }, + "externalId": &dcl.Property{ + Type: "string", + GoName: "ExternalId", + Description: "Optional. An externally-generated and managed ID for this Membership. This ID may be modified after creation, but this is not recommended. The ID must match the regex: `*` If this Membership represents a Kubernetes cluster, this value should be set to the UID of the `kube-system` namespace object.", + ServerDefault: true, + }, + "infrastructureType": &dcl.Property{ + Type: "string", + GoName: "InfrastructureType", + GoType: "MembershipInfrastructureTypeEnum", + Description: "Optional. The infrastructure type this Membership is running on. Possible values: INFRASTRUCTURE_TYPE_UNSPECIFIED, ON_PREM, MULTI_CLOUD", + ServerDefault: true, + Enum: []string{ + "INFRASTRUCTURE_TYPE_UNSPECIFIED", + "ON_PREM", + "MULTI_CLOUD", + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "Optional. GCP labels for this membership.", + }, + "lastConnectionTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "LastConnectionTime", + ReadOnly: true, + Description: "Output only. For clusters using Connect, the timestamp of the most recent connection established with Google Cloud. This time is updated every several minutes, not continuously. For clusters that do not use GKE Connect, or that have never connected successfully, this field will be unset.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + Parameter: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Output only. The full, unique name of this Membership resource in the format `projects/*/locations/*/memberships/{membership_id}`, set during creation. `membership_id` must be a valid RFC 1123 compliant DNS label: 1. At most 63 characters in length 2. It must consist of lower case alphanumeric characters or `-` 3. It must start and end with an alphanumeric character Which can be expressed as the regex: `)?`, with a maximum length of 63 characters.", + Immutable: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "state": &dcl.Property{ + Type: "object", + GoName: "State", + GoType: "MembershipState", + ReadOnly: true, + Description: "Output only. State of the Membership resource.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "code": &dcl.Property{ + Type: "string", + GoName: "Code", + GoType: "MembershipStateCodeEnum", + ReadOnly: true, + Description: "Output only. The current state of the Membership resource. Possible values: CODE_UNSPECIFIED, CREATING, READY, DELETING, UPDATING, SERVICE_UPDATING", + Immutable: true, + Enum: []string{ + "CODE_UNSPECIFIED", + "CREATING", + "READY", + "DELETING", + "UPDATING", + "SERVICE_UPDATING", + }, + }, + }, + }, + "uniqueId": &dcl.Property{ + Type: "string", + GoName: "UniqueId", + ReadOnly: true, + Description: "Output only. Google-generated UUID for this resource. This is unique across all Membership resources. If a Membership resource is deleted and another resource with the same name is created, it gets a different unique_id.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. When the Membership was last updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl new file mode 100644 index 000000000000..e8b89b89cb3a --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl @@ -0,0 +1,27 @@ +package gkehub + +func alsoExpandEmptyBundlesInMap(c *Client, f map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]any, error) { + if len(f) == 0 { + return nil, nil + } + + items := make(map[string]any) + for k, v := range f { + i, err := alsoExpandEmptyBundles(c, &v, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + return items, nil +} + +func alsoExpandEmptyBundles(c *Client, f *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, res *FeatureMembership) (map[string]any, error) { + m := make(map[string]any) + if v := f.ExemptedNamespaces; v != nil { + m["exemptedNamespaces"] = v + } + return m, nil +} diff --git a/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go new file mode 100644 index 000000000000..5decc37e9671 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package gkehub + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLGkeHubClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.GKEHubBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl new file mode 100644 index 000000000000..150a0fdd4f17 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl @@ -0,0 +1,1894 @@ +package gkehub + +import ( + "context" + "fmt" + "log" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" +{{- end }} + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub" +{{- else }} + gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGkeHubFeatureMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceGkeHubFeatureMembershipCreate, + Read: resourceGkeHubFeatureMembershipRead, + Update: resourceGkeHubFeatureMembershipUpdate, + Delete: resourceGkeHubFeatureMembershipDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGkeHubFeatureMembershipImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, +{{- if ne $.TargetVersionName "ga" }} + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), +{{- end }} + + Schema: map[string]*schema.Schema{ + "feature": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the feature", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location of the feature", + }, + + "membership": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The name of the membership", + }, + + "configmanagement": { + Type: schema.TypeList, + Optional: true, + Description: "Config Management-specific spec.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementSchema(), + }, + + "membership_location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The location of the membership", + }, + + "mesh": { + Type: schema.TypeList, + Optional: true, + Description: "Manage Mesh Features", + MaxItems: 1, + Elem: GkeHubFeatureMembershipMeshSchema(), + }, + + "policycontroller": { + Type: schema.TypeList, + Optional: true, + Description: "Policy Controller-specific spec.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project of the feature", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_sync": { + Type: schema.TypeList, + Optional: true, + Description: "Config Sync configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncSchema(), + }, + + "hierarchy_controller": { + Type: schema.TypeList, + Optional: true, + Description: "Hierarchy Controller configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema(), + }, + + "management": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", + }, + + "policy_controller": { + Type: schema.TypeList, + Optional: true, + Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema(), + }, + + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Version of ACM to install. Defaults to the latest version.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment_overrides": { + Type: schema.TypeList, + Optional: true, + Description: "The override configurations for the Config Sync Deployments.", + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSchema(), + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", + }, + + "git": { + Type: schema.TypeList, + Optional: true, + Description: "", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema(), + }, + + "metrics_gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring.", + }, + + "oci": { + Type: schema.TypeList, + Optional: true, + Description: "", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncOciSchema(), + }, + + "prevent_drift": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: "Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", + }, + + "source_format": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", + }, + + "stop_syncing": { + Type: schema.TypeBool, + Optional: true, + Description: "Set to true to stop syncing configs for a single cluster. Default: false.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Optional: true, + Description: "The override configurations for the containers in the Deployment.", + Elem: GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSchema(), + }, + + "deployment_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the Deployment.", + }, + + "deployment_namespace": { + Type: schema.TypeString, + Optional: true, + Description: "The namespace of the Deployment.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of the container.", + }, + + "cpu_limit": { + Type: schema.TypeString, + Optional: true, + Description: "The CPU limit of the container.", + }, + + "cpu_request": { + Type: schema.TypeString, + Optional: true, + Description: "The CPU request of the container.", + }, + + "memory_limit": { + Type: schema.TypeString, + Optional: true, + Description: "The memory limit of the container.", + }, + + "memory_request": { + Type: schema.TypeString, + Optional: true, + Description: "The memory request of the container.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncGitSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The GCP Service Account Email used for auth when secretType is gcpServiceAccount.", + }, + + "https_proxy": { + Type: schema.TypeString, + Optional: true, + Description: "URL for the HTTPS proxy to be used when communicating with the Git repo.", + }, + + "policy_dir": { + Type: schema.TypeString, + Optional: true, + Description: "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", + }, + + "secret_type": { + Type: schema.TypeString, + Optional: true, + Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + + "sync_branch": { + Type: schema.TypeString, + Optional: true, + Description: "The branch of the repository to sync from. Default: master.", + }, + + "sync_repo": { + Type: schema.TypeString, + Optional: true, + Description: "The URL of the Git repository to use as the source of truth.", + }, + + "sync_rev": { + Type: schema.TypeString, + Optional: true, + Description: "Git revision (tag or hash) to check out. Default HEAD.", + }, + + "sync_wait_secs": { + Type: schema.TypeString, + Optional: true, + Description: "Period in seconds between consecutive syncs. Default: 15.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementConfigSyncOciSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_service_account_email": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The GCP Service Account Email used for auth when secret_type is gcpserviceaccount. ", + }, + + "policy_dir": { + Type: schema.TypeString, + Optional: true, + Description: "The absolute path of the directory that contains the local resources. Default: the root directory of the image.", + }, + + "secret_type": { + Type: schema.TypeString, + Optional: true, + Description: "Type of secret configured for access to the OCI Image. Must be one of gcenode, gcpserviceaccount or none. The validation of this is case-sensitive.", + }, + + "sync_repo": { + Type: schema.TypeString, + Optional: true, + Description: "The OCI image repository URL for the package to sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.", + }, + + "sync_wait_secs": { + Type: schema.TypeString, + Optional: true, + Description: "Period in seconds(int64 format) between consecutive syncs. Default: 15.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementHierarchyControllerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_hierarchical_resource_quota": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether hierarchical resource quota is enabled in this cluster.", + }, + + "enable_pod_tree_labels": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether pod tree labels are enabled in this cluster.", + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementPolicyControllerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_interval_seconds": { + Type: schema.TypeString, + Optional: true, + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the installation of Policy Controller. If false, the rest of PolicyController fields take no effect.", + }, + + "exemptable_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "log_denies_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Logs all denies and dry run failures.", + }, + + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", + }, + + "referential_rules_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + + "template_library_installed": { + Type: schema.TypeBool, + Optional: true, + Description: "Installs the default template library along with Policy Controller.", + }, + }, + } +} + +func GkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipMeshSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "control_plane": { + Type: schema.TypeString, + Optional: true, + Description: "**DEPRECATED** Whether to automatically manage Service Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED, AUTOMATIC, MANUAL", + Deprecated: "Deprecated in favor of the `management` field", + }, + + "management": { + Type: schema.TypeString, + Optional: true, + Description: "Whether to automatically manage Service Mesh. Possible values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy_controller_hub_config": { + Type: schema.TypeList, + Required: true, + Description: "Policy Controller configuration for the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema(), + }, + + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional. Version of Policy Controller to install. Defaults to the latest version.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_interval_seconds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", + }, + + "constraint_violation_limit": { + Type: schema.TypeInt, + Optional: true, + Description: "The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used.", + }, + + "deployment_configs": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: "Map of deployment configs to deployments (\"admission\", \"audit\", \"mutation\").", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema()), + }, + + "exemptable_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "install_spec": { + Type: schema.TypeString, + Optional: true, + Description: "Configures the mode of the Policy Controller installation. Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED, INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED", + }, + + "log_denies_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Logs all denies and dry run failures.", + }, + + "monitoring": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSchema(), + }, + + "mutation_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to mutate resources using Policy Controller.", + }, + + "policy_content": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Specifies the desired policy content on the cluster.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSchema(), + }, + + "referential_rules_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "container_resources": { + Type: schema.TypeList, + Optional: true, + Description: "Container resource requirements.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema(), + }, + + "pod_affinity": { + Type: schema.TypeString, + Optional: true, + Description: "Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY", + }, + + "pod_tolerations": { + Type: schema.TypeList, + Optional: true, + Description: "Pod tolerations of node taints.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema(), + }, + + "replica_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Pod replica count.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeList, + Optional: true, + Description: "Limits describes the maximum amount of compute resources allowed for use by the running container.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema(), + }, + + "requests": { + Type: schema.TypeList, + Optional: true, + Description: "Requests describes the amount of compute resources reserved for the container by the kube-scheduler.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema(), + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimitsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequestsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + Description: "CPU requirement expressed in Kubernetes resource units.", + }, + + "memory": { + Type: schema.TypeString, + Optional: true, + Description: "Memory requirement expressed in Kubernetes resource units.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint effect.", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint key (not necessarily unique).", + }, + + "operator": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint operator.", + }, + + "value": { + Type: schema.TypeString, + Optional: true, + Description: "Matches a taint value.", + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backends": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bundles": { + Type: schema.TypeSet, + Optional: true, + Description: "map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint.", + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema(), + Set: schema.HashResource(GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema()), + }, + + "template_library": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configures the installation of the Template Library.", + MaxItems: 1, + Elem: GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySchema(), + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bundle_name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the key in the map for which this object is mapped to in the API", + }, + + "exempted_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: "The set of namespaces to be exempted from the bundle.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func GkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrarySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "installation": { + Type: schema.TypeString, + Optional: true, + Description: "Configures the manner in which the template library is installed on the cluster. Possible values: INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL", + }, + }, + } +} + +func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &gkehub.FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/membershipId/{{ "{{" }}membership{{ "}}" }}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished creating FeatureMembership %q: %#v", d.Id(), res) + + return resourceGkeHubFeatureMembershipRead(d, meta) +} + +func resourceGkeHubFeatureMembershipRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &gkehub.FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFeatureMembership(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("GkeHubFeatureMembership %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("feature", res.Feature); err != nil { + return fmt.Errorf("error setting feature in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("membership", res.Membership); err != nil { + return fmt.Errorf("error setting membership in state: %s", err) + } + if err = d.Set("configmanagement", flattenGkeHubFeatureMembershipConfigmanagement(res.Configmanagement)); err != nil { + return fmt.Errorf("error setting configmanagement in state: %s", err) + } + if err = d.Set("membership_location", res.MembershipLocation); err != nil { + return fmt.Errorf("error setting membership_location in state: %s", err) + } + if err = d.Set("mesh", flattenGkeHubFeatureMembershipMesh(res.Mesh)); err != nil { + return fmt.Errorf("error setting mesh in state: %s", err) + } + if err = d.Set("policycontroller", flattenGkeHubFeatureMembershipPolicycontroller(res.Policycontroller)); err != nil { + return fmt.Errorf("error setting policycontroller in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + + return nil +} +func resourceGkeHubFeatureMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &gkehub.FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFeatureMembership(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished creating FeatureMembership %q: %#v", d.Id(), res) + + return resourceGkeHubFeatureMembershipRead(d, meta) +} + +func resourceGkeHubFeatureMembershipDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &gkehub.FeatureMembership{ + Feature: dcl.String(d.Get("feature").(string)), + Location: dcl.String(d.Get("location").(string)), + Membership: dcl.String(d.Get("membership").(string)), + Configmanagement: expandGkeHubFeatureMembershipConfigmanagement(d.Get("configmanagement")), + MembershipLocation: dcl.String(d.Get("membership_location").(string)), + Mesh: expandGkeHubFeatureMembershipMesh(d.Get("mesh")), + Policycontroller: expandGkeHubFeatureMembershipPolicycontroller(d.Get("policycontroller")), + Project: dcl.String(project), + } + lockName, err := tpgresource.ReplaceVarsForId(d, config, "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + log.Printf("[DEBUG] Deleting FeatureMembership %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFeatureMembership(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FeatureMembership: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FeatureMembership %q", d.Id()) + return nil +} + +func resourceGkeHubFeatureMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/features/(?P[^/]+)/membershipId/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/membershipId/{{ "{{" }}membership{{ "}}" }}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandGkeHubFeatureMembershipConfigmanagement(o interface{}) *gkehub.FeatureMembershipConfigmanagement { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagement + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagement + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagement{ + ConfigSync: expandGkeHubFeatureMembershipConfigmanagementConfigSync(obj["config_sync"]), + HierarchyController: expandGkeHubFeatureMembershipConfigmanagementHierarchyController(obj["hierarchy_controller"]), + Management: gkehub.FeatureMembershipConfigmanagementManagementEnumRef(obj["management"].(string)), + PolicyController: expandGkeHubFeatureMembershipConfigmanagementPolicyController(obj["policy_controller"]), + Version: dcl.StringOrNil(obj["version"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagement(obj *gkehub.FeatureMembershipConfigmanagement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "config_sync": flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj.ConfigSync), + "hierarchy_controller": flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj.HierarchyController), + "management": obj.Management, + "policy_controller": flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj.PolicyController), + "version": obj.Version, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSync { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSync + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSync + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementConfigSync{ + DeploymentOverrides: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(obj["deployment_overrides"]), + Enabled: dcl.Bool(obj["enabled"].(bool)), + Git: expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj["git"]), + MetricsGcpServiceAccountEmail: dcl.String(obj["metrics_gcp_service_account_email"].(string)), + Oci: expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj["oci"]), + PreventDrift: dcl.Bool(obj["prevent_drift"].(bool)), + SourceFormat: dcl.String(obj["source_format"].(string)), + StopSyncing: dcl.Bool(obj["stop_syncing"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *gkehub.FeatureMembershipConfigmanagementConfigSync) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment_overrides": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(obj.DeploymentOverrides), + "enabled": obj.Enabled, + "git": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj.Git), + "metrics_gcp_service_account_email": obj.MetricsGcpServiceAccountEmail, + "oci": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj.Oci), + "prevent_drift": obj.PreventDrift, + "source_format": obj.SourceFormat, + "stop_syncing": obj.StopSyncing, + } + + return []interface{}{transformed} + +} +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if o == nil { + return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + } + + items := make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{ + Containers: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(obj["containers"]), + DeploymentName: dcl.String(obj["deployment_name"].(string)), + DeploymentNamespace: dcl.String(obj["deployment_namespace"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(objs []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "containers": flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(obj.Containers), + "deployment_name": obj.DeploymentName, + "deployment_namespace": obj.DeploymentNamespace, + } + + return transformed + +} +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if o == nil { + return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + } + + items := make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{ + ContainerName: dcl.String(obj["container_name"].(string)), + CpuLimit: dcl.String(obj["cpu_limit"].(string)), + CpuRequest: dcl.String(obj["cpu_request"].(string)), + MemoryLimit: dcl.String(obj["memory_limit"].(string)), + MemoryRequest: dcl.String(obj["memory_request"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(objs []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "container_name": obj.ContainerName, + "cpu_limit": obj.CpuLimit, + "cpu_request": obj.CpuRequest, + "memory_limit": obj.MemoryLimit, + "memory_request": obj.MemoryRequest, + } + + return transformed + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncGit { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncGit + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementConfigSyncGit{ + GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), + HttpsProxy: dcl.String(obj["https_proxy"].(string)), + PolicyDir: dcl.String(obj["policy_dir"].(string)), + SecretType: dcl.String(obj["secret_type"].(string)), + SyncBranch: dcl.String(obj["sync_branch"].(string)), + SyncRepo: dcl.String(obj["sync_repo"].(string)), + SyncRev: dcl.String(obj["sync_rev"].(string)), + SyncWaitSecs: dcl.String(obj["sync_wait_secs"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncGit) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gcp_service_account_email": obj.GcpServiceAccountEmail, + "https_proxy": obj.HttpsProxy, + "policy_dir": obj.PolicyDir, + "secret_type": obj.SecretType, + "sync_branch": obj.SyncBranch, + "sync_repo": obj.SyncRepo, + "sync_rev": obj.SyncRev, + "sync_wait_secs": obj.SyncWaitSecs, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncOci { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncOci + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementConfigSyncOci{ + GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), + PolicyDir: dcl.String(obj["policy_dir"].(string)), + SecretType: dcl.String(obj["secret_type"].(string)), + SyncRepo: dcl.String(obj["sync_repo"].(string)), + SyncWaitSecs: dcl.String(obj["sync_wait_secs"].(string)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncOci) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gcp_service_account_email": obj.GcpServiceAccountEmail, + "policy_dir": obj.PolicyDir, + "secret_type": obj.SecretType, + "sync_repo": obj.SyncRepo, + "sync_wait_secs": obj.SyncWaitSecs, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementHierarchyController(o interface{}) *gkehub.FeatureMembershipConfigmanagementHierarchyController { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementHierarchyController + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementHierarchyController + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementHierarchyController{ + EnableHierarchicalResourceQuota: dcl.Bool(obj["enable_hierarchical_resource_quota"].(bool)), + EnablePodTreeLabels: dcl.Bool(obj["enable_pod_tree_labels"].(bool)), + Enabled: dcl.Bool(obj["enabled"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj *gkehub.FeatureMembershipConfigmanagementHierarchyController) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_hierarchical_resource_quota": obj.EnableHierarchicalResourceQuota, + "enable_pod_tree_labels": obj.EnablePodTreeLabels, + "enabled": obj.Enabled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyController { + if o == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementPolicyController + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipConfigmanagementPolicyController + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementPolicyController{ + AuditIntervalSeconds: dcl.String(obj["audit_interval_seconds"].(string)), + Enabled: dcl.Bool(obj["enabled"].(bool)), + ExemptableNamespaces: tpgdclresource.ExpandStringArray(obj["exemptable_namespaces"]), + LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), + ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), + TemplateLibraryInstalled: dcl.Bool(obj["template_library_installed"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub.FeatureMembershipConfigmanagementPolicyController) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "audit_interval_seconds": obj.AuditIntervalSeconds, + "enabled": obj.Enabled, + "exemptable_namespaces": obj.ExemptableNamespaces, + "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, + "referential_rules_enabled": obj.ReferentialRulesEnabled, + "template_library_installed": obj.TemplateLibraryInstalled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring{ + Backends: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipMesh(o interface{}) *gkehub.FeatureMembershipMesh { + if o == nil { + return gkehub.EmptyFeatureMembershipMesh + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipMesh + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipMesh{ + ControlPlane: gkehub.FeatureMembershipMeshControlPlaneEnumRef(obj["control_plane"].(string)), + Management: gkehub.FeatureMembershipMeshManagementEnumRef(obj["management"].(string)), + } +} + +func flattenGkeHubFeatureMembershipMesh(obj *gkehub.FeatureMembershipMesh) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "control_plane": obj.ControlPlane, + "management": obj.Management, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontroller(o interface{}) *gkehub.FeatureMembershipPolicycontroller { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontroller + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontroller + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontroller{ + PolicyControllerHubConfig: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj["policy_controller_hub_config"]), + Version: dcl.StringOrNil(obj["version"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontroller(obj *gkehub.FeatureMembershipPolicycontroller) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "policy_controller_hub_config": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj.PolicyControllerHubConfig), + "version": obj.Version, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig{ + AuditIntervalSeconds: dcl.Int64OrNil(int64(obj["audit_interval_seconds"].(int))), + ConstraintViolationLimit: dcl.Int64(int64(obj["constraint_violation_limit"].(int))), + DeploymentConfigs: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj["deployment_configs"]), + ExemptableNamespaces: tpgdclresource.ExpandStringArray(obj["exemptable_namespaces"]), + InstallSpec: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(obj["install_spec"].(string)), + LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), + Monitoring: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj["monitoring"]), + MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), + PolicyContent: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj["policy_content"]), + ReferentialRulesEnabled: dcl.Bool(obj["referential_rules_enabled"].(bool)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "audit_interval_seconds": obj.AuditIntervalSeconds, + "constraint_violation_limit": obj.ConstraintViolationLimit, + "deployment_configs": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj.DeploymentConfigs), + "exemptable_namespaces": obj.ExemptableNamespaces, + "install_spec": obj.InstallSpec, + "log_denies_enabled": obj.LogDeniesEnabled, + "monitoring": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj.Monitoring), + "mutation_enabled": obj.MutationEnabled, + "policy_content": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj.PolicyContent), + "referential_rules_enabled": obj.ReferentialRulesEnabled, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return nil + } + + items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(item) + if item != nil { + items[item.(map[string]interface{})["component_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{ + ContainerResources: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj["container_resources"]), + PodAffinity: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(obj["pod_affinity"].(string)), + PodTolerations: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj["pod_tolerations"]), + ReplicaCount: dcl.Int64(int64(obj["replica_count"].(int))), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "container_resources": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj.ContainerResources), + "pod_affinity": obj.PodAffinity, + "pod_tolerations": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj.PodTolerations), + "replica_count": obj.ReplicaCount, + } + + transformed["component_name"] = name + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{ + Limits: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj["limits"]), + Requests: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj["requests"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "limits": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj.Limits), + "requests": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj.Requests), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{ + Cpu: dcl.String(obj["cpu"].(string)), + Memory: dcl.String(obj["memory"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu": obj.Cpu, + "memory": obj.Memory, + } + + return []interface{}{transformed} + +} +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(o interface{}) []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + } + + items := make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(objs)) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(item) + items = append(items, *i) + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{ + Effect: dcl.String(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Operator: dcl.String(obj["operator"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(objs []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(&item) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "operator": obj.Operator, + "value": obj.Value, + } + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{ + Backends: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj["backends"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "backends": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj.Backends), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{ + Bundles: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj["bundles"]), + TemplateLibrary: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj["template_library"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bundles": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj.Bundles), + "template_library": flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj.TemplateLibrary), + } + + return []interface{}{transformed} + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + } + + items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + for _, item := range objs { + i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(item) + if item != nil { + items[item.(map[string]interface{})["bundle_name"].(string)] = *i + } + } + + return items +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { + if o == nil { + return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + } + + obj := o.(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{ + ExemptedNamespaces: tpgdclresource.ExpandStringArray(obj["exempted_namespaces"]), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for name, item := range objs { + i := flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(&item, name) + items = append(items, i) + } + + return items +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, name string) interface{} { + if obj == nil { + return nil + } + transformed := map[string]interface{}{ + "exempted_namespaces": obj.ExemptedNamespaces, + } + + transformed["bundle_name"] = name + + return transformed + +} + +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{ + Installation: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(obj["installation"].(string)), + } +} + +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "installation": obj.Installation, + } + + return []interface{}{transformed} + +} + +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { + objs := o.([]interface{}) + items := make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(objs)) + for _, item := range objs { + i := gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl index 9825cee156a2..eb6beb9f6f96 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl @@ -6,8 +6,7 @@ import ( "strings" "testing" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub{{ $.DCLVersion }}" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -1242,7 +1241,7 @@ resource "google_gke_hub_membership" "membership_acmoci" { func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.StringOrNil(feature), Location: dcl.StringOrNil(location), Membership: dcl.StringOrNil(membership), @@ -1260,7 +1259,7 @@ func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, func testAccCheckGkeHubFeatureMembershipNotPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.StringOrNil(feature), Location: dcl.StringOrNil(location), Membership: dcl.StringOrNil(membership), @@ -1338,7 +1337,7 @@ resource "google_project_service" "container" { resource "google_project_service" "gkehub" { project = google_project.project.project_id - service = "gkehub.googleapis.com" + service = "googleapis.com" depends_on = [google_project_service.container] } diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl new file mode 100644 index 000000000000..e28180ff2595 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl @@ -0,0 +1,18 @@ +package recaptchaenterprise + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl new file mode 100644 index 000000000000..197f20db5df8 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl @@ -0,0 +1,764 @@ +package recaptchaenterprise + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +type Key struct { + Name *string `json:"name"` + DisplayName *string `json:"displayName"` + WebSettings *KeyWebSettings `json:"webSettings"` + AndroidSettings *KeyAndroidSettings `json:"androidSettings"` + IosSettings *KeyIosSettings `json:"iosSettings"` + Labels map[string]string `json:"labels"` + CreateTime *string `json:"createTime"` + TestingOptions *KeyTestingOptions `json:"testingOptions"` + WafSettings *KeyWafSettings `json:"wafSettings"` + Project *string `json:"project"` +} + +func (r *Key) String() string { + return dcl.SprintResource(r) +} + +// The enum KeyWebSettingsIntegrationTypeEnum. +type KeyWebSettingsIntegrationTypeEnum string + +// KeyWebSettingsIntegrationTypeEnumRef returns a *KeyWebSettingsIntegrationTypeEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWebSettingsIntegrationTypeEnumRef(s string) *KeyWebSettingsIntegrationTypeEnum { + v := KeyWebSettingsIntegrationTypeEnum(s) + return &v +} + +func (v KeyWebSettingsIntegrationTypeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"SCORE", "CHECKBOX", "INVISIBLE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWebSettingsIntegrationTypeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWebSettingsChallengeSecurityPreferenceEnum. +type KeyWebSettingsChallengeSecurityPreferenceEnum string + +// KeyWebSettingsChallengeSecurityPreferenceEnumRef returns a *KeyWebSettingsChallengeSecurityPreferenceEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWebSettingsChallengeSecurityPreferenceEnumRef(s string) *KeyWebSettingsChallengeSecurityPreferenceEnum { + v := KeyWebSettingsChallengeSecurityPreferenceEnum(s) + return &v +} + +func (v KeyWebSettingsChallengeSecurityPreferenceEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED", "USABILITY", "BALANCE", "SECURITY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWebSettingsChallengeSecurityPreferenceEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyTestingOptionsTestingChallengeEnum. +type KeyTestingOptionsTestingChallengeEnum string + +// KeyTestingOptionsTestingChallengeEnumRef returns a *KeyTestingOptionsTestingChallengeEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyTestingOptionsTestingChallengeEnumRef(s string) *KeyTestingOptionsTestingChallengeEnum { + v := KeyTestingOptionsTestingChallengeEnum(s) + return &v +} + +func (v KeyTestingOptionsTestingChallengeEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"TESTING_CHALLENGE_UNSPECIFIED", "NOCAPTCHA", "UNSOLVABLE_CHALLENGE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyTestingOptionsTestingChallengeEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWafSettingsWafServiceEnum. +type KeyWafSettingsWafServiceEnum string + +// KeyWafSettingsWafServiceEnumRef returns a *KeyWafSettingsWafServiceEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWafSettingsWafServiceEnumRef(s string) *KeyWafSettingsWafServiceEnum { + v := KeyWafSettingsWafServiceEnum(s) + return &v +} + +func (v KeyWafSettingsWafServiceEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CA", "FASTLY"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWafSettingsWafServiceEnum", + Value: string(v), + Valid: []string{}, + } +} + +// The enum KeyWafSettingsWafFeatureEnum. +type KeyWafSettingsWafFeatureEnum string + +// KeyWafSettingsWafFeatureEnumRef returns a *KeyWafSettingsWafFeatureEnum with the value of string s +// If the empty string is provided, nil is returned. +func KeyWafSettingsWafFeatureEnumRef(s string) *KeyWafSettingsWafFeatureEnum { + v := KeyWafSettingsWafFeatureEnum(s) + return &v +} + +func (v KeyWafSettingsWafFeatureEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"CHALLENGE_PAGE", "SESSION_TOKEN", "ACTION_TOKEN", "EXPRESS"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "KeyWafSettingsWafFeatureEnum", + Value: string(v), + Valid: []string{}, + } +} + +type KeyWebSettings struct { + empty bool `json:"-"` + AllowAllDomains *bool `json:"allowAllDomains"` + AllowedDomains []string `json:"allowedDomains"` + AllowAmpTraffic *bool `json:"allowAmpTraffic"` + IntegrationType *KeyWebSettingsIntegrationTypeEnum `json:"integrationType"` + ChallengeSecurityPreference *KeyWebSettingsChallengeSecurityPreferenceEnum `json:"challengeSecurityPreference"` +} + +type jsonKeyWebSettings KeyWebSettings + +func (r *KeyWebSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyWebSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyWebSettings + } else { + + r.AllowAllDomains = res.AllowAllDomains + + r.AllowedDomains = res.AllowedDomains + + r.AllowAmpTraffic = res.AllowAmpTraffic + + r.IntegrationType = res.IntegrationType + + r.ChallengeSecurityPreference = res.ChallengeSecurityPreference + + } + return nil +} + +// This object is used to assert a desired state where this KeyWebSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyWebSettings *KeyWebSettings = &KeyWebSettings{empty: true} + +func (r *KeyWebSettings) Empty() bool { + return r.empty +} + +func (r *KeyWebSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyWebSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyAndroidSettings struct { + empty bool `json:"-"` + AllowAllPackageNames *bool `json:"allowAllPackageNames"` + AllowedPackageNames []string `json:"allowedPackageNames"` +} + +type jsonKeyAndroidSettings KeyAndroidSettings + +func (r *KeyAndroidSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyAndroidSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyAndroidSettings + } else { + + r.AllowAllPackageNames = res.AllowAllPackageNames + + r.AllowedPackageNames = res.AllowedPackageNames + + } + return nil +} + +// This object is used to assert a desired state where this KeyAndroidSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyAndroidSettings *KeyAndroidSettings = &KeyAndroidSettings{empty: true} + +func (r *KeyAndroidSettings) Empty() bool { + return r.empty +} + +func (r *KeyAndroidSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyAndroidSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyIosSettings struct { + empty bool `json:"-"` + AllowAllBundleIds *bool `json:"allowAllBundleIds"` + AllowedBundleIds []string `json:"allowedBundleIds"` +} + +type jsonKeyIosSettings KeyIosSettings + +func (r *KeyIosSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyIosSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyIosSettings + } else { + + r.AllowAllBundleIds = res.AllowAllBundleIds + + r.AllowedBundleIds = res.AllowedBundleIds + + } + return nil +} + +// This object is used to assert a desired state where this KeyIosSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyIosSettings *KeyIosSettings = &KeyIosSettings{empty: true} + +func (r *KeyIosSettings) Empty() bool { + return r.empty +} + +func (r *KeyIosSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyIosSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyTestingOptions struct { + empty bool `json:"-"` + TestingScore *float64 `json:"testingScore"` + TestingChallenge *KeyTestingOptionsTestingChallengeEnum `json:"testingChallenge"` +} + +type jsonKeyTestingOptions KeyTestingOptions + +func (r *KeyTestingOptions) UnmarshalJSON(data []byte) error { + var res jsonKeyTestingOptions + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyTestingOptions + } else { + + r.TestingScore = res.TestingScore + + r.TestingChallenge = res.TestingChallenge + + } + return nil +} + +// This object is used to assert a desired state where this KeyTestingOptions is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyTestingOptions *KeyTestingOptions = &KeyTestingOptions{empty: true} + +func (r *KeyTestingOptions) Empty() bool { + return r.empty +} + +func (r *KeyTestingOptions) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyTestingOptions) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type KeyWafSettings struct { + empty bool `json:"-"` + WafService *KeyWafSettingsWafServiceEnum `json:"wafService"` + WafFeature *KeyWafSettingsWafFeatureEnum `json:"wafFeature"` +} + +type jsonKeyWafSettings KeyWafSettings + +func (r *KeyWafSettings) UnmarshalJSON(data []byte) error { + var res jsonKeyWafSettings + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyKeyWafSettings + } else { + + r.WafService = res.WafService + + r.WafFeature = res.WafFeature + + } + return nil +} + +// This object is used to assert a desired state where this KeyWafSettings is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyKeyWafSettings *KeyWafSettings = &KeyWafSettings{empty: true} + +func (r *KeyWafSettings) Empty() bool { + return r.empty +} + +func (r *KeyWafSettings) String() string { + return dcl.SprintResource(r) +} + +func (r *KeyWafSettings) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.Sum256([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Key) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "recaptcha_enterprise", + Type: "Key", +{{- if ne $.TargetVersionName "ga" }} + Version: "beta", +{{- else }} + Version: "recaptchaenterprise", +{{- end }} + } +} + +func (r *Key) ID() (string, error) { + if err := extractKeyFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "display_name": dcl.ValueOrEmptyString(nr.DisplayName), + "web_settings": dcl.ValueOrEmptyString(nr.WebSettings), + "android_settings": dcl.ValueOrEmptyString(nr.AndroidSettings), + "ios_settings": dcl.ValueOrEmptyString(nr.IosSettings), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "testing_options": dcl.ValueOrEmptyString(nr.TestingOptions), + "waf_settings": dcl.ValueOrEmptyString(nr.WafSettings), + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", params), nil +} + +const KeyMaxPage = -1 + +type KeyList struct { + Items []*Key + + nextToken string + + pageSize int32 + + resource *Key +} + +func (l *KeyList) HasNext() bool { + return l.nextToken != "" +} + +func (l *KeyList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listKey(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListKey(ctx context.Context, project string) (*KeyList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListKeyWithMaxResults(ctx, project, KeyMaxPage) + +} + +func (c *Client) ListKeyWithMaxResults(ctx context.Context, project string, pageSize int32) (*KeyList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Key{ + Project: &project, + } + items, token, err := c.listKey(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &KeyList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetKey(ctx context.Context, r *Key) (*Key, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractKeyFields(r) + + b, err := c.getKeyRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalKey(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeKeyNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractKeyFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteKey(ctx context.Context, r *Key) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Key resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Key...") + deleteOp := deleteKeyOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllKey deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllKey(ctx context.Context, project string, filter func(*Key) bool) error { + listObj, err := c.ListKey(ctx, project) + if err != nil { + return err + } + + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllKey(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyKey(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Key + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyKeyHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyKeyHelper(c *Client, ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (*Key, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyKey...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractKeyFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.keyDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToKeyDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []keyApiOperation + if create { + ops = append(ops, &createKeyOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyKeyDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyKeyDiff(c *Client, ctx context.Context, desired *Key, rawDesired *Key, ops []keyApiOperation, opts ...dcl.ApplyOption) (*Key, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetKey(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createKeyOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapKey(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeKeyNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeKeyNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeKeyDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractKeyFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractKeyFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffKey(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl new file mode 100644 index 000000000000..398a758724e0 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl @@ -0,0 +1,2750 @@ +package recaptchaenterprise + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func (r *Key) validate() error { + + if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"WebSettings", "AndroidSettings", "IosSettings"}, r.WebSettings, r.AndroidSettings, r.IosSettings); err != nil { + return err + } + if err := dcl.Required(r, "displayName"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.WebSettings) { + if err := r.WebSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AndroidSettings) { + if err := r.AndroidSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.IosSettings) { + if err := r.IosSettings.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.TestingOptions) { + if err := r.TestingOptions.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.WafSettings) { + if err := r.WafSettings.validate(); err != nil { + return err + } + } + return nil +} +func (r *KeyWebSettings) validate() error { + if err := dcl.Required(r, "integrationType"); err != nil { + return err + } + return nil +} +func (r *KeyAndroidSettings) validate() error { + return nil +} +func (r *KeyIosSettings) validate() error { + return nil +} +func (r *KeyTestingOptions) validate() error { + return nil +} +func (r *KeyWafSettings) validate() error { + if err := dcl.Required(r, "wafService"); err != nil { + return err + } + if err := dcl.Required(r, "wafFeature"); err != nil { + return err + } + return nil +} +func (r *Key) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://recaptchaenterprise.googleapis.com/v1/", params) +} + +func (r *Key) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +func (r *Key) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys", nr.basePath(), userBasePath, params), nil + +} + +func (r *Key) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil +} + +// keyApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type keyApiOperation interface { + do(context.Context, *Key, *Client) error +} + +// newUpdateKeyUpdateKeyRequest creates a request for an +// Key resource's UpdateKey update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateKeyUpdateKeyRequest(ctx context.Context, f *Key, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { + req["displayName"] = v + } + if v, err := expandKeyWebSettings(c, f.WebSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WebSettings into webSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["webSettings"] = v + } + if v, err := expandKeyAndroidSettings(c, f.AndroidSettings, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidSettings into androidSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["androidSettings"] = v + } + if v, err := expandKeyIosSettings(c, f.IosSettings, res); err != nil { + return nil, fmt.Errorf("error expanding IosSettings into iosSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["iosSettings"] = v + } + if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { + req["labels"] = v + } + return req, nil +} + +// marshalUpdateKeyUpdateKeyRequest converts the update into +// the final JSON request body. +func marshalUpdateKeyUpdateKeyRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateKeyUpdateKeyOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateKeyUpdateKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + _, err := c.GetKey(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateKey") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateKeyUpdateKeyRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateKeyUpdateKeyRequest(c, req) + if err != nil { + return err + } + _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + return nil +} + +func (c *Client) listKeyRaw(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != KeyMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listKeyOperation struct { + Keys []map[string]interface{} `json:"keys"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listKey(ctx context.Context, r *Key, pageToken string, pageSize int32) ([]*Key, string, error) { + b, err := c.listKeyRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listKeyOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Key + for _, v := range m.Keys { + res, err := unmarshalMapKey(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllKey(ctx context.Context, f func(*Key) bool, resources []*Key) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteKey(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteKeyOperation struct{} + +func (op *deleteKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + r, err := c.GetKey(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Key not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetKey checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return fmt.Errorf("failed to delete Key: %w", err) + } + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createKeyOperation struct { + response map[string]interface{} +} + +func (op *createKeyOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createKeyOperation) do(ctx context.Context, r *Key, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + if r.Name != nil { + // Allowing creation to continue with Name set could result in a Key with the wrong Name. + return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + + o, err := dcl.ResponseBodyAsJSON(resp) + if err != nil { + return fmt.Errorf("error decoding response body into JSON: %w", err) + } + op.response = o + + // Include Name in URL substitution for initial GET request. + m := op.response + r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + + if _, err := c.GetKey(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getKeyRaw(ctx context.Context, r *Key) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) keyDiffsForRawDesired(ctx context.Context, rawDesired *Key, opts ...dcl.ApplyOption) (initial, desired *Key, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Key + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Key); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Key, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + if fetchState.Name == nil { + // We cannot perform a get because of lack of information. We have to assume + // that this is being created for the first time. + desired, err := canonicalizeKeyDesiredState(rawDesired, nil) + return nil, desired, nil, err + } + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetKey(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Key resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Key resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Key resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Key: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Key: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractKeyFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeKeyInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Key: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeKeyDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Key: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffKey(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeKeyInitialState(rawInitial, rawDesired *Key) (*Key, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.WebSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.AndroidSettings, rawInitial.IosSettings) { + rawInitial.WebSettings = EmptyKeyWebSettings + } + } + + if !dcl.IsZeroValue(rawInitial.AndroidSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WebSettings, rawInitial.IosSettings) { + rawInitial.AndroidSettings = EmptyKeyAndroidSettings + } + } + + if !dcl.IsZeroValue(rawInitial.IosSettings) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.WebSettings, rawInitial.AndroidSettings) { + rawInitial.IosSettings = EmptyKeyIosSettings + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeKeyDesiredState(rawDesired, rawInitial *Key, opts ...dcl.ApplyOption) (*Key, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.WebSettings = canonicalizeKeyWebSettings(rawDesired.WebSettings, nil, opts...) + rawDesired.AndroidSettings = canonicalizeKeyAndroidSettings(rawDesired.AndroidSettings, nil, opts...) + rawDesired.IosSettings = canonicalizeKeyIosSettings(rawDesired.IosSettings, nil, opts...) + rawDesired.TestingOptions = canonicalizeKeyTestingOptions(rawDesired.TestingOptions, nil, opts...) + rawDesired.WafSettings = canonicalizeKeyWafSettings(rawDesired.WafSettings, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Key{} + if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { + canonicalDesired.DisplayName = rawInitial.DisplayName + } else { + canonicalDesired.DisplayName = rawDesired.DisplayName + } + canonicalDesired.WebSettings = canonicalizeKeyWebSettings(rawDesired.WebSettings, rawInitial.WebSettings, opts...) + canonicalDesired.AndroidSettings = canonicalizeKeyAndroidSettings(rawDesired.AndroidSettings, rawInitial.AndroidSettings, opts...) + canonicalDesired.IosSettings = canonicalizeKeyIosSettings(rawDesired.IosSettings, rawInitial.IosSettings, opts...) + if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Labels = rawInitial.Labels + } else { + canonicalDesired.Labels = rawDesired.Labels + } + canonicalDesired.TestingOptions = canonicalizeKeyTestingOptions(rawDesired.TestingOptions, rawInitial.TestingOptions, opts...) + canonicalDesired.WafSettings = canonicalizeKeyWafSettings(rawDesired.WafSettings, rawInitial.WafSettings, opts...) + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + + if canonicalDesired.WebSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.AndroidSettings, rawDesired.IosSettings) { + canonicalDesired.WebSettings = EmptyKeyWebSettings + } + } + + if canonicalDesired.AndroidSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WebSettings, rawDesired.IosSettings) { + canonicalDesired.AndroidSettings = EmptyKeyAndroidSettings + } + } + + if canonicalDesired.IosSettings != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.WebSettings, rawDesired.AndroidSettings) { + canonicalDesired.IosSettings = EmptyKeyIosSettings + } + } + + return canonicalDesired, nil +} + +func canonicalizeKeyNewState(c *Client, rawNew, rawDesired *Key) (*Key, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } else { + if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { + rawNew.DisplayName = rawDesired.DisplayName + } + } + + if dcl.IsEmptyValueIndirect(rawNew.WebSettings) && dcl.IsEmptyValueIndirect(rawDesired.WebSettings) { + rawNew.WebSettings = rawDesired.WebSettings + } else { + rawNew.WebSettings = canonicalizeNewKeyWebSettings(c, rawDesired.WebSettings, rawNew.WebSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.AndroidSettings) && dcl.IsEmptyValueIndirect(rawDesired.AndroidSettings) { + rawNew.AndroidSettings = rawDesired.AndroidSettings + } else { + rawNew.AndroidSettings = canonicalizeNewKeyAndroidSettings(c, rawDesired.AndroidSettings, rawNew.AndroidSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.IosSettings) && dcl.IsEmptyValueIndirect(rawDesired.IosSettings) { + rawNew.IosSettings = rawDesired.IosSettings + } else { + rawNew.IosSettings = canonicalizeNewKeyIosSettings(c, rawDesired.IosSettings, rawNew.IosSettings) + } + + if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { + rawNew.Labels = rawDesired.Labels + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.TestingOptions) && dcl.IsEmptyValueIndirect(rawDesired.TestingOptions) { + rawNew.TestingOptions = rawDesired.TestingOptions + } else { + rawNew.TestingOptions = canonicalizeNewKeyTestingOptions(c, rawDesired.TestingOptions, rawNew.TestingOptions) + } + + if dcl.IsEmptyValueIndirect(rawNew.WafSettings) && dcl.IsEmptyValueIndirect(rawDesired.WafSettings) { + rawNew.WafSettings = rawDesired.WafSettings + } else { + rawNew.WafSettings = canonicalizeNewKeyWafSettings(c, rawDesired.WafSettings, rawNew.WafSettings) + } + + rawNew.Project = rawDesired.Project + + return rawNew, nil +} + +func canonicalizeKeyWebSettings(des, initial *KeyWebSettings, opts ...dcl.ApplyOption) *KeyWebSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyWebSettings{} + + if dcl.BoolCanonicalize(des.AllowAllDomains, initial.AllowAllDomains) || dcl.IsZeroValue(des.AllowAllDomains) { + cDes.AllowAllDomains = initial.AllowAllDomains + } else { + cDes.AllowAllDomains = des.AllowAllDomains + } + if dcl.StringArrayCanonicalize(des.AllowedDomains, initial.AllowedDomains) { + cDes.AllowedDomains = initial.AllowedDomains + } else { + cDes.AllowedDomains = des.AllowedDomains + } + if dcl.BoolCanonicalize(des.AllowAmpTraffic, initial.AllowAmpTraffic) || dcl.IsZeroValue(des.AllowAmpTraffic) { + cDes.AllowAmpTraffic = initial.AllowAmpTraffic + } else { + cDes.AllowAmpTraffic = des.AllowAmpTraffic + } + if dcl.IsZeroValue(des.IntegrationType) || (dcl.IsEmptyValueIndirect(des.IntegrationType) && dcl.IsEmptyValueIndirect(initial.IntegrationType)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.IntegrationType = initial.IntegrationType + } else { + cDes.IntegrationType = des.IntegrationType + } + if dcl.IsZeroValue(des.ChallengeSecurityPreference) || (dcl.IsEmptyValueIndirect(des.ChallengeSecurityPreference) && dcl.IsEmptyValueIndirect(initial.ChallengeSecurityPreference)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.ChallengeSecurityPreference = initial.ChallengeSecurityPreference + } else { + cDes.ChallengeSecurityPreference = des.ChallengeSecurityPreference + } + + return cDes +} + +func canonicalizeKeyWebSettingsSlice(des, initial []KeyWebSettings, opts ...dcl.ApplyOption) []KeyWebSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyWebSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyWebSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyWebSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyWebSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyWebSettings(c *Client, des, nw *KeyWebSettings) *KeyWebSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyWebSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllDomains, nw.AllowAllDomains) { + nw.AllowAllDomains = des.AllowAllDomains + } + if dcl.StringArrayCanonicalize(des.AllowedDomains, nw.AllowedDomains) { + nw.AllowedDomains = des.AllowedDomains + } + if dcl.BoolCanonicalize(des.AllowAmpTraffic, nw.AllowAmpTraffic) { + nw.AllowAmpTraffic = des.AllowAmpTraffic + } + + return nw +} + +func canonicalizeNewKeyWebSettingsSet(c *Client, des, nw []KeyWebSettings) []KeyWebSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyWebSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyWebSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyWebSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyWebSettingsSlice(c *Client, des, nw []KeyWebSettings) []KeyWebSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyWebSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyWebSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyAndroidSettings(des, initial *KeyAndroidSettings, opts ...dcl.ApplyOption) *KeyAndroidSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyAndroidSettings{} + + if dcl.BoolCanonicalize(des.AllowAllPackageNames, initial.AllowAllPackageNames) || dcl.IsZeroValue(des.AllowAllPackageNames) { + cDes.AllowAllPackageNames = initial.AllowAllPackageNames + } else { + cDes.AllowAllPackageNames = des.AllowAllPackageNames + } + if dcl.StringArrayCanonicalize(des.AllowedPackageNames, initial.AllowedPackageNames) { + cDes.AllowedPackageNames = initial.AllowedPackageNames + } else { + cDes.AllowedPackageNames = des.AllowedPackageNames + } + + return cDes +} + +func canonicalizeKeyAndroidSettingsSlice(des, initial []KeyAndroidSettings, opts ...dcl.ApplyOption) []KeyAndroidSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyAndroidSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyAndroidSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyAndroidSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyAndroidSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyAndroidSettings(c *Client, des, nw *KeyAndroidSettings) *KeyAndroidSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyAndroidSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllPackageNames, nw.AllowAllPackageNames) { + nw.AllowAllPackageNames = des.AllowAllPackageNames + } + if dcl.StringArrayCanonicalize(des.AllowedPackageNames, nw.AllowedPackageNames) { + nw.AllowedPackageNames = des.AllowedPackageNames + } + + return nw +} + +func canonicalizeNewKeyAndroidSettingsSet(c *Client, des, nw []KeyAndroidSettings) []KeyAndroidSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyAndroidSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyAndroidSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyAndroidSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyAndroidSettingsSlice(c *Client, des, nw []KeyAndroidSettings) []KeyAndroidSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyAndroidSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyAndroidSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyIosSettings(des, initial *KeyIosSettings, opts ...dcl.ApplyOption) *KeyIosSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyIosSettings{} + + if dcl.BoolCanonicalize(des.AllowAllBundleIds, initial.AllowAllBundleIds) || dcl.IsZeroValue(des.AllowAllBundleIds) { + cDes.AllowAllBundleIds = initial.AllowAllBundleIds + } else { + cDes.AllowAllBundleIds = des.AllowAllBundleIds + } + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, initial.AllowedBundleIds) { + cDes.AllowedBundleIds = initial.AllowedBundleIds + } else { + cDes.AllowedBundleIds = des.AllowedBundleIds + } + + return cDes +} + +func canonicalizeKeyIosSettingsSlice(des, initial []KeyIosSettings, opts ...dcl.ApplyOption) []KeyIosSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyIosSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyIosSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyIosSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyIosSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyIosSettings(c *Client, des, nw *KeyIosSettings) *KeyIosSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyIosSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AllowAllBundleIds, nw.AllowAllBundleIds) { + nw.AllowAllBundleIds = des.AllowAllBundleIds + } + if dcl.StringArrayCanonicalize(des.AllowedBundleIds, nw.AllowedBundleIds) { + nw.AllowedBundleIds = des.AllowedBundleIds + } + + return nw +} + +func canonicalizeNewKeyIosSettingsSet(c *Client, des, nw []KeyIosSettings) []KeyIosSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyIosSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyIosSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyIosSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyIosSettingsSlice(c *Client, des, nw []KeyIosSettings) []KeyIosSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyIosSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyIosSettings(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyTestingOptions(des, initial *KeyTestingOptions, opts ...dcl.ApplyOption) *KeyTestingOptions { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyTestingOptions{} + + if dcl.IsZeroValue(des.TestingScore) || (dcl.IsEmptyValueIndirect(des.TestingScore) && dcl.IsEmptyValueIndirect(initial.TestingScore)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TestingScore = initial.TestingScore + } else { + cDes.TestingScore = des.TestingScore + } + if dcl.IsZeroValue(des.TestingChallenge) || (dcl.IsEmptyValueIndirect(des.TestingChallenge) && dcl.IsEmptyValueIndirect(initial.TestingChallenge)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.TestingChallenge = initial.TestingChallenge + } else { + cDes.TestingChallenge = des.TestingChallenge + } + + return cDes +} + +func canonicalizeKeyTestingOptionsSlice(des, initial []KeyTestingOptions, opts ...dcl.ApplyOption) []KeyTestingOptions { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyTestingOptions, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyTestingOptions(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyTestingOptions, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyTestingOptions(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyTestingOptions(c *Client, des, nw *KeyTestingOptions) *KeyTestingOptions { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyTestingOptions while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewKeyTestingOptionsSet(c *Client, des, nw []KeyTestingOptions) []KeyTestingOptions { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyTestingOptions + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyTestingOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyTestingOptions(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyTestingOptionsSlice(c *Client, des, nw []KeyTestingOptions) []KeyTestingOptions { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyTestingOptions + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyTestingOptions(c, &d, &n)) + } + + return items +} + +func canonicalizeKeyWafSettings(des, initial *KeyWafSettings, opts ...dcl.ApplyOption) *KeyWafSettings { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &KeyWafSettings{} + + if dcl.IsZeroValue(des.WafService) || (dcl.IsEmptyValueIndirect(des.WafService) && dcl.IsEmptyValueIndirect(initial.WafService)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WafService = initial.WafService + } else { + cDes.WafService = des.WafService + } + if dcl.IsZeroValue(des.WafFeature) || (dcl.IsEmptyValueIndirect(des.WafFeature) && dcl.IsEmptyValueIndirect(initial.WafFeature)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WafFeature = initial.WafFeature + } else { + cDes.WafFeature = des.WafFeature + } + + return cDes +} + +func canonicalizeKeyWafSettingsSlice(des, initial []KeyWafSettings, opts ...dcl.ApplyOption) []KeyWafSettings { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]KeyWafSettings, 0, len(des)) + for _, d := range des { + cd := canonicalizeKeyWafSettings(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]KeyWafSettings, 0, len(des)) + for i, d := range des { + cd := canonicalizeKeyWafSettings(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewKeyWafSettings(c *Client, des, nw *KeyWafSettings) *KeyWafSettings { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for KeyWafSettings while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewKeyWafSettingsSet(c *Client, des, nw []KeyWafSettings) []KeyWafSettings { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []KeyWafSettings + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareKeyWafSettingsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewKeyWafSettings(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewKeyWafSettingsSlice(c *Client, des, nw []KeyWafSettings) []KeyWafSettings { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []KeyWafSettings + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewKeyWafSettings(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffKey(c *Client, desired, actual *Key, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WebSettings, actual.WebSettings, dcl.DiffInfo{ObjectFunction: compareKeyWebSettingsNewStyle, EmptyObject: EmptyKeyWebSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WebSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.AndroidSettings, actual.AndroidSettings, dcl.DiffInfo{ObjectFunction: compareKeyAndroidSettingsNewStyle, EmptyObject: EmptyKeyAndroidSettings, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AndroidSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.IosSettings, actual.IosSettings, dcl.DiffInfo{ObjectFunction: compareKeyIosSettingsNewStyle, EmptyObject: EmptyKeyIosSettings, OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("IosSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.TestingOptions, actual.TestingOptions, dcl.DiffInfo{ObjectFunction: compareKeyTestingOptionsNewStyle, EmptyObject: EmptyKeyTestingOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingOptions")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.WafSettings, actual.WafSettings, dcl.DiffInfo{ObjectFunction: compareKeyWafSettingsNewStyle, EmptyObject: EmptyKeyWafSettings, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafSettings")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareKeyWebSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyWebSettings) + if !ok { + desiredNotPointer, ok := d.(KeyWebSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWebSettings or *KeyWebSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyWebSettings) + if !ok { + actualNotPointer, ok := a.(KeyWebSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWebSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllDomains, actual.AllowAllDomains, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllDomains")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedDomains, actual.AllowedDomains, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedDomains")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowAmpTraffic, actual.AllowAmpTraffic, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAmpTraffic")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.IntegrationType, actual.IntegrationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IntegrationType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ChallengeSecurityPreference, actual.ChallengeSecurityPreference, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("ChallengeSecurityPreference")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyAndroidSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyAndroidSettings) + if !ok { + desiredNotPointer, ok := d.(KeyAndroidSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyAndroidSettings or *KeyAndroidSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyAndroidSettings) + if !ok { + actualNotPointer, ok := a.(KeyAndroidSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyAndroidSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllPackageNames, actual.AllowAllPackageNames, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllPackageNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedPackageNames, actual.AllowedPackageNames, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedPackageNames")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyIosSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyIosSettings) + if !ok { + desiredNotPointer, ok := d.(KeyIosSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyIosSettings or *KeyIosSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyIosSettings) + if !ok { + actualNotPointer, ok := a.(KeyIosSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyIosSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AllowAllBundleIds, actual.AllowAllBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowAllBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AllowedBundleIds, actual.AllowedBundleIds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateKeyUpdateKeyOperation")}, fn.AddNest("AllowedBundleIds")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyTestingOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyTestingOptions) + if !ok { + desiredNotPointer, ok := d.(KeyTestingOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyTestingOptions or *KeyTestingOptions", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyTestingOptions) + if !ok { + actualNotPointer, ok := a.(KeyTestingOptions) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyTestingOptions", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TestingScore, actual.TestingScore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingScore")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TestingChallenge, actual.TestingChallenge, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TestingChallenge")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareKeyWafSettingsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*KeyWafSettings) + if !ok { + desiredNotPointer, ok := d.(KeyWafSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWafSettings or *KeyWafSettings", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*KeyWafSettings) + if !ok { + actualNotPointer, ok := a.(KeyWafSettings) + if !ok { + return nil, fmt.Errorf("obj %v is not a KeyWafSettings", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.WafService, actual.WafService, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafService")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WafFeature, actual.WafFeature, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WafFeature")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Key) urlNormalized() *Key { + normalized := dcl.Copy(*r).(Key) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) + normalized.Project = dcl.SelfLinkToName(r.Project) + return &normalized +} + +func (r *Key) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateKey" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Key resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Key) marshal(c *Client) ([]byte, error) { + m, err := expandKey(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Key: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalKey decodes JSON responses into the Key resource schema. +func unmarshalKey(b []byte, c *Client, res *Key) (*Key, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapKey(m, c, res) +} + +func unmarshalMapKey(m map[string]interface{}, c *Client, res *Key) (*Key, error) { + + flattened := flattenKey(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandKey expands Key into a JSON request object. +func expandKey(c *Client, f *Key) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/keys/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.DisplayName; dcl.ValueShouldBeSent(v) { + m["displayName"] = v + } + if v, err := expandKeyWebSettings(c, f.WebSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WebSettings into webSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["webSettings"] = v + } + if v, err := expandKeyAndroidSettings(c, f.AndroidSettings, res); err != nil { + return nil, fmt.Errorf("error expanding AndroidSettings into androidSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["androidSettings"] = v + } + if v, err := expandKeyIosSettings(c, f.IosSettings, res); err != nil { + return nil, fmt.Errorf("error expanding IosSettings into iosSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["iosSettings"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandKeyTestingOptions(c, f.TestingOptions, res); err != nil { + return nil, fmt.Errorf("error expanding TestingOptions into testingOptions: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["testingOptions"] = v + } + if v, err := expandKeyWafSettings(c, f.WafSettings, res); err != nil { + return nil, fmt.Errorf("error expanding WafSettings into wafSettings: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["wafSettings"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + + return m, nil +} + +// flattenKey flattens Key from a JSON request object into the +// Key type. +func flattenKey(c *Client, i interface{}, res *Key) *Key { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Key{} + resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) + resultRes.DisplayName = dcl.FlattenString(m["displayName"]) + resultRes.WebSettings = flattenKeyWebSettings(c, m["webSettings"], res) + resultRes.AndroidSettings = flattenKeyAndroidSettings(c, m["androidSettings"], res) + resultRes.IosSettings = flattenKeyIosSettings(c, m["iosSettings"], res) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.TestingOptions = flattenKeyTestingOptions(c, m["testingOptions"], res) + resultRes.WafSettings = flattenKeyWafSettings(c, m["wafSettings"], res) + resultRes.Project = dcl.FlattenString(m["project"]) + + return resultRes +} + +// expandKeyWebSettingsMap expands the contents of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettingsMap(c *Client, f map[string]KeyWebSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyWebSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyWebSettingsSlice expands the contents of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettingsSlice(c *Client, f []KeyWebSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyWebSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyWebSettingsMap flattens the contents of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettings{} + } + + if len(a) == 0 { + return map[string]KeyWebSettings{} + } + + items := make(map[string]KeyWebSettings) + for k, item := range a { + items[k] = *flattenKeyWebSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyWebSettingsSlice flattens the contents of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettingsSlice(c *Client, i interface{}, res *Key) []KeyWebSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettings{} + } + + if len(a) == 0 { + return []KeyWebSettings{} + } + + items := make([]KeyWebSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyWebSettings expands an instance of KeyWebSettings into a JSON +// request object. +func expandKeyWebSettings(c *Client, f *KeyWebSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllDomains; !dcl.IsEmptyValueIndirect(v) { + m["allowAllDomains"] = v + } + if v := f.AllowedDomains; v != nil { + m["allowedDomains"] = v + } + if v := f.AllowAmpTraffic; !dcl.IsEmptyValueIndirect(v) { + m["allowAmpTraffic"] = v + } + if v := f.IntegrationType; !dcl.IsEmptyValueIndirect(v) { + m["integrationType"] = v + } + if v := f.ChallengeSecurityPreference; !dcl.IsEmptyValueIndirect(v) { + m["challengeSecurityPreference"] = v + } + + return m, nil +} + +// flattenKeyWebSettings flattens an instance of KeyWebSettings from a JSON +// response object. +func flattenKeyWebSettings(c *Client, i interface{}, res *Key) *KeyWebSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyWebSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyWebSettings + } + r.AllowAllDomains = dcl.FlattenBool(m["allowAllDomains"]) + r.AllowedDomains = dcl.FlattenStringSlice(m["allowedDomains"]) + r.AllowAmpTraffic = dcl.FlattenBool(m["allowAmpTraffic"]) + r.IntegrationType = flattenKeyWebSettingsIntegrationTypeEnum(m["integrationType"]) + r.ChallengeSecurityPreference = flattenKeyWebSettingsChallengeSecurityPreferenceEnum(m["challengeSecurityPreference"]) + + return r +} + +// expandKeyAndroidSettingsMap expands the contents of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettingsMap(c *Client, f map[string]KeyAndroidSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyAndroidSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyAndroidSettingsSlice expands the contents of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettingsSlice(c *Client, f []KeyAndroidSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyAndroidSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyAndroidSettingsMap flattens the contents of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyAndroidSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyAndroidSettings{} + } + + if len(a) == 0 { + return map[string]KeyAndroidSettings{} + } + + items := make(map[string]KeyAndroidSettings) + for k, item := range a { + items[k] = *flattenKeyAndroidSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyAndroidSettingsSlice flattens the contents of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettingsSlice(c *Client, i interface{}, res *Key) []KeyAndroidSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyAndroidSettings{} + } + + if len(a) == 0 { + return []KeyAndroidSettings{} + } + + items := make([]KeyAndroidSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyAndroidSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyAndroidSettings expands an instance of KeyAndroidSettings into a JSON +// request object. +func expandKeyAndroidSettings(c *Client, f *KeyAndroidSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllPackageNames; !dcl.IsEmptyValueIndirect(v) { + m["allowAllPackageNames"] = v + } + if v := f.AllowedPackageNames; v != nil { + m["allowedPackageNames"] = v + } + + return m, nil +} + +// flattenKeyAndroidSettings flattens an instance of KeyAndroidSettings from a JSON +// response object. +func flattenKeyAndroidSettings(c *Client, i interface{}, res *Key) *KeyAndroidSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyAndroidSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyAndroidSettings + } + r.AllowAllPackageNames = dcl.FlattenBool(m["allowAllPackageNames"]) + r.AllowedPackageNames = dcl.FlattenStringSlice(m["allowedPackageNames"]) + + return r +} + +// expandKeyIosSettingsMap expands the contents of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettingsMap(c *Client, f map[string]KeyIosSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyIosSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyIosSettingsSlice expands the contents of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettingsSlice(c *Client, f []KeyIosSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyIosSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyIosSettingsMap flattens the contents of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyIosSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyIosSettings{} + } + + if len(a) == 0 { + return map[string]KeyIosSettings{} + } + + items := make(map[string]KeyIosSettings) + for k, item := range a { + items[k] = *flattenKeyIosSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyIosSettingsSlice flattens the contents of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettingsSlice(c *Client, i interface{}, res *Key) []KeyIosSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyIosSettings{} + } + + if len(a) == 0 { + return []KeyIosSettings{} + } + + items := make([]KeyIosSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyIosSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyIosSettings expands an instance of KeyIosSettings into a JSON +// request object. +func expandKeyIosSettings(c *Client, f *KeyIosSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AllowAllBundleIds; !dcl.IsEmptyValueIndirect(v) { + m["allowAllBundleIds"] = v + } + if v := f.AllowedBundleIds; v != nil { + m["allowedBundleIds"] = v + } + + return m, nil +} + +// flattenKeyIosSettings flattens an instance of KeyIosSettings from a JSON +// response object. +func flattenKeyIosSettings(c *Client, i interface{}, res *Key) *KeyIosSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyIosSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyIosSettings + } + r.AllowAllBundleIds = dcl.FlattenBool(m["allowAllBundleIds"]) + r.AllowedBundleIds = dcl.FlattenStringSlice(m["allowedBundleIds"]) + + return r +} + +// expandKeyTestingOptionsMap expands the contents of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptionsMap(c *Client, f map[string]KeyTestingOptions, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyTestingOptions(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyTestingOptionsSlice expands the contents of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptionsSlice(c *Client, f []KeyTestingOptions, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyTestingOptions(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyTestingOptionsMap flattens the contents of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptionsMap(c *Client, i interface{}, res *Key) map[string]KeyTestingOptions { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyTestingOptions{} + } + + if len(a) == 0 { + return map[string]KeyTestingOptions{} + } + + items := make(map[string]KeyTestingOptions) + for k, item := range a { + items[k] = *flattenKeyTestingOptions(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyTestingOptionsSlice flattens the contents of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptionsSlice(c *Client, i interface{}, res *Key) []KeyTestingOptions { + a, ok := i.([]interface{}) + if !ok { + return []KeyTestingOptions{} + } + + if len(a) == 0 { + return []KeyTestingOptions{} + } + + items := make([]KeyTestingOptions, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyTestingOptions(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyTestingOptions expands an instance of KeyTestingOptions into a JSON +// request object. +func expandKeyTestingOptions(c *Client, f *KeyTestingOptions, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TestingScore; !dcl.IsEmptyValueIndirect(v) { + m["testingScore"] = v + } + if v := f.TestingChallenge; !dcl.IsEmptyValueIndirect(v) { + m["testingChallenge"] = v + } + + return m, nil +} + +// flattenKeyTestingOptions flattens an instance of KeyTestingOptions from a JSON +// response object. +func flattenKeyTestingOptions(c *Client, i interface{}, res *Key) *KeyTestingOptions { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyTestingOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyTestingOptions + } + r.TestingScore = dcl.FlattenDouble(m["testingScore"]) + r.TestingChallenge = flattenKeyTestingOptionsTestingChallengeEnum(m["testingChallenge"]) + + return r +} + +// expandKeyWafSettingsMap expands the contents of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettingsMap(c *Client, f map[string]KeyWafSettings, res *Key) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandKeyWafSettings(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandKeyWafSettingsSlice expands the contents of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettingsSlice(c *Client, f []KeyWafSettings, res *Key) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandKeyWafSettings(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenKeyWafSettingsMap flattens the contents of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettingsMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettings { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettings{} + } + + if len(a) == 0 { + return map[string]KeyWafSettings{} + } + + items := make(map[string]KeyWafSettings) + for k, item := range a { + items[k] = *flattenKeyWafSettings(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenKeyWafSettingsSlice flattens the contents of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettingsSlice(c *Client, i interface{}, res *Key) []KeyWafSettings { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettings{} + } + + if len(a) == 0 { + return []KeyWafSettings{} + } + + items := make([]KeyWafSettings, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettings(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandKeyWafSettings expands an instance of KeyWafSettings into a JSON +// request object. +func expandKeyWafSettings(c *Client, f *KeyWafSettings, res *Key) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.WafService; !dcl.IsEmptyValueIndirect(v) { + m["wafService"] = v + } + if v := f.WafFeature; !dcl.IsEmptyValueIndirect(v) { + m["wafFeature"] = v + } + + return m, nil +} + +// flattenKeyWafSettings flattens an instance of KeyWafSettings from a JSON +// response object. +func flattenKeyWafSettings(c *Client, i interface{}, res *Key) *KeyWafSettings { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &KeyWafSettings{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyKeyWafSettings + } + r.WafService = flattenKeyWafSettingsWafServiceEnum(m["wafService"]) + r.WafFeature = flattenKeyWafSettingsWafFeatureEnum(m["wafFeature"]) + + return r +} + +// flattenKeyWebSettingsIntegrationTypeEnumMap flattens the contents of KeyWebSettingsIntegrationTypeEnum from a JSON +// response object. +func flattenKeyWebSettingsIntegrationTypeEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettingsIntegrationTypeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettingsIntegrationTypeEnum{} + } + + if len(a) == 0 { + return map[string]KeyWebSettingsIntegrationTypeEnum{} + } + + items := make(map[string]KeyWebSettingsIntegrationTypeEnum) + for k, item := range a { + items[k] = *flattenKeyWebSettingsIntegrationTypeEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWebSettingsIntegrationTypeEnumSlice flattens the contents of KeyWebSettingsIntegrationTypeEnum from a JSON +// response object. +func flattenKeyWebSettingsIntegrationTypeEnumSlice(c *Client, i interface{}, res *Key) []KeyWebSettingsIntegrationTypeEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettingsIntegrationTypeEnum{} + } + + if len(a) == 0 { + return []KeyWebSettingsIntegrationTypeEnum{} + } + + items := make([]KeyWebSettingsIntegrationTypeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettingsIntegrationTypeEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWebSettingsIntegrationTypeEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWebSettingsIntegrationTypeEnum with the same value as that string. +func flattenKeyWebSettingsIntegrationTypeEnum(i interface{}) *KeyWebSettingsIntegrationTypeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWebSettingsIntegrationTypeEnumRef(s) +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnumMap flattens the contents of KeyWebSettingsChallengeSecurityPreferenceEnum from a JSON +// response object. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWebSettingsChallengeSecurityPreferenceEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + if len(a) == 0 { + return map[string]KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + items := make(map[string]KeyWebSettingsChallengeSecurityPreferenceEnum) + for k, item := range a { + items[k] = *flattenKeyWebSettingsChallengeSecurityPreferenceEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnumSlice flattens the contents of KeyWebSettingsChallengeSecurityPreferenceEnum from a JSON +// response object. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnumSlice(c *Client, i interface{}, res *Key) []KeyWebSettingsChallengeSecurityPreferenceEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + if len(a) == 0 { + return []KeyWebSettingsChallengeSecurityPreferenceEnum{} + } + + items := make([]KeyWebSettingsChallengeSecurityPreferenceEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWebSettingsChallengeSecurityPreferenceEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWebSettingsChallengeSecurityPreferenceEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWebSettingsChallengeSecurityPreferenceEnum with the same value as that string. +func flattenKeyWebSettingsChallengeSecurityPreferenceEnum(i interface{}) *KeyWebSettingsChallengeSecurityPreferenceEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWebSettingsChallengeSecurityPreferenceEnumRef(s) +} + +// flattenKeyTestingOptionsTestingChallengeEnumMap flattens the contents of KeyTestingOptionsTestingChallengeEnum from a JSON +// response object. +func flattenKeyTestingOptionsTestingChallengeEnumMap(c *Client, i interface{}, res *Key) map[string]KeyTestingOptionsTestingChallengeEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyTestingOptionsTestingChallengeEnum{} + } + + if len(a) == 0 { + return map[string]KeyTestingOptionsTestingChallengeEnum{} + } + + items := make(map[string]KeyTestingOptionsTestingChallengeEnum) + for k, item := range a { + items[k] = *flattenKeyTestingOptionsTestingChallengeEnum(item.(interface{})) + } + + return items +} + +// flattenKeyTestingOptionsTestingChallengeEnumSlice flattens the contents of KeyTestingOptionsTestingChallengeEnum from a JSON +// response object. +func flattenKeyTestingOptionsTestingChallengeEnumSlice(c *Client, i interface{}, res *Key) []KeyTestingOptionsTestingChallengeEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyTestingOptionsTestingChallengeEnum{} + } + + if len(a) == 0 { + return []KeyTestingOptionsTestingChallengeEnum{} + } + + items := make([]KeyTestingOptionsTestingChallengeEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyTestingOptionsTestingChallengeEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyTestingOptionsTestingChallengeEnum asserts that an interface is a string, and returns a +// pointer to a *KeyTestingOptionsTestingChallengeEnum with the same value as that string. +func flattenKeyTestingOptionsTestingChallengeEnum(i interface{}) *KeyTestingOptionsTestingChallengeEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyTestingOptionsTestingChallengeEnumRef(s) +} + +// flattenKeyWafSettingsWafServiceEnumMap flattens the contents of KeyWafSettingsWafServiceEnum from a JSON +// response object. +func flattenKeyWafSettingsWafServiceEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettingsWafServiceEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettingsWafServiceEnum{} + } + + if len(a) == 0 { + return map[string]KeyWafSettingsWafServiceEnum{} + } + + items := make(map[string]KeyWafSettingsWafServiceEnum) + for k, item := range a { + items[k] = *flattenKeyWafSettingsWafServiceEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWafSettingsWafServiceEnumSlice flattens the contents of KeyWafSettingsWafServiceEnum from a JSON +// response object. +func flattenKeyWafSettingsWafServiceEnumSlice(c *Client, i interface{}, res *Key) []KeyWafSettingsWafServiceEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettingsWafServiceEnum{} + } + + if len(a) == 0 { + return []KeyWafSettingsWafServiceEnum{} + } + + items := make([]KeyWafSettingsWafServiceEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettingsWafServiceEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWafSettingsWafServiceEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWafSettingsWafServiceEnum with the same value as that string. +func flattenKeyWafSettingsWafServiceEnum(i interface{}) *KeyWafSettingsWafServiceEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWafSettingsWafServiceEnumRef(s) +} + +// flattenKeyWafSettingsWafFeatureEnumMap flattens the contents of KeyWafSettingsWafFeatureEnum from a JSON +// response object. +func flattenKeyWafSettingsWafFeatureEnumMap(c *Client, i interface{}, res *Key) map[string]KeyWafSettingsWafFeatureEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]KeyWafSettingsWafFeatureEnum{} + } + + if len(a) == 0 { + return map[string]KeyWafSettingsWafFeatureEnum{} + } + + items := make(map[string]KeyWafSettingsWafFeatureEnum) + for k, item := range a { + items[k] = *flattenKeyWafSettingsWafFeatureEnum(item.(interface{})) + } + + return items +} + +// flattenKeyWafSettingsWafFeatureEnumSlice flattens the contents of KeyWafSettingsWafFeatureEnum from a JSON +// response object. +func flattenKeyWafSettingsWafFeatureEnumSlice(c *Client, i interface{}, res *Key) []KeyWafSettingsWafFeatureEnum { + a, ok := i.([]interface{}) + if !ok { + return []KeyWafSettingsWafFeatureEnum{} + } + + if len(a) == 0 { + return []KeyWafSettingsWafFeatureEnum{} + } + + items := make([]KeyWafSettingsWafFeatureEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenKeyWafSettingsWafFeatureEnum(item.(interface{}))) + } + + return items +} + +// flattenKeyWafSettingsWafFeatureEnum asserts that an interface is a string, and returns a +// pointer to a *KeyWafSettingsWafFeatureEnum with the same value as that string. +func flattenKeyWafSettingsWafFeatureEnum(i interface{}) *KeyWafSettingsWafFeatureEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return KeyWafSettingsWafFeatureEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Key) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalKey(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type keyDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp keyApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToKeyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]keyDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []keyDiff + // For each operation name, create a keyDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := keyDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToKeyApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToKeyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (keyApiOperation, error) { + switch opName { + + case "updateKeyUpdateKeyOperation": + return &updateKeyUpdateKeyOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractKeyFields(r *Key) error { + vWebSettings := r.WebSettings + if vWebSettings == nil { + // note: explicitly not the empty object. + vWebSettings = &KeyWebSettings{} + } + if err := extractKeyWebSettingsFields(r, vWebSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWebSettings) { + r.WebSettings = vWebSettings + } + vAndroidSettings := r.AndroidSettings + if vAndroidSettings == nil { + // note: explicitly not the empty object. + vAndroidSettings = &KeyAndroidSettings{} + } + if err := extractKeyAndroidSettingsFields(r, vAndroidSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidSettings) { + r.AndroidSettings = vAndroidSettings + } + vIosSettings := r.IosSettings + if vIosSettings == nil { + // note: explicitly not the empty object. + vIosSettings = &KeyIosSettings{} + } + if err := extractKeyIosSettingsFields(r, vIosSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosSettings) { + r.IosSettings = vIosSettings + } + vTestingOptions := r.TestingOptions + if vTestingOptions == nil { + // note: explicitly not the empty object. + vTestingOptions = &KeyTestingOptions{} + } + if err := extractKeyTestingOptionsFields(r, vTestingOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTestingOptions) { + r.TestingOptions = vTestingOptions + } + vWafSettings := r.WafSettings + if vWafSettings == nil { + // note: explicitly not the empty object. + vWafSettings = &KeyWafSettings{} + } + if err := extractKeyWafSettingsFields(r, vWafSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWafSettings) { + r.WafSettings = vWafSettings + } + return nil +} +func extractKeyWebSettingsFields(r *Key, o *KeyWebSettings) error { + return nil +} +func extractKeyAndroidSettingsFields(r *Key, o *KeyAndroidSettings) error { + return nil +} +func extractKeyIosSettingsFields(r *Key, o *KeyIosSettings) error { + return nil +} +func extractKeyTestingOptionsFields(r *Key, o *KeyTestingOptions) error { + return nil +} +func extractKeyWafSettingsFields(r *Key, o *KeyWafSettings) error { + return nil +} + +func postReadExtractKeyFields(r *Key) error { + vWebSettings := r.WebSettings + if vWebSettings == nil { + // note: explicitly not the empty object. + vWebSettings = &KeyWebSettings{} + } + if err := postReadExtractKeyWebSettingsFields(r, vWebSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWebSettings) { + r.WebSettings = vWebSettings + } + vAndroidSettings := r.AndroidSettings + if vAndroidSettings == nil { + // note: explicitly not the empty object. + vAndroidSettings = &KeyAndroidSettings{} + } + if err := postReadExtractKeyAndroidSettingsFields(r, vAndroidSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAndroidSettings) { + r.AndroidSettings = vAndroidSettings + } + vIosSettings := r.IosSettings + if vIosSettings == nil { + // note: explicitly not the empty object. + vIosSettings = &KeyIosSettings{} + } + if err := postReadExtractKeyIosSettingsFields(r, vIosSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vIosSettings) { + r.IosSettings = vIosSettings + } + vTestingOptions := r.TestingOptions + if vTestingOptions == nil { + // note: explicitly not the empty object. + vTestingOptions = &KeyTestingOptions{} + } + if err := postReadExtractKeyTestingOptionsFields(r, vTestingOptions); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTestingOptions) { + r.TestingOptions = vTestingOptions + } + vWafSettings := r.WafSettings + if vWafSettings == nil { + // note: explicitly not the empty object. + vWafSettings = &KeyWafSettings{} + } + if err := postReadExtractKeyWafSettingsFields(r, vWafSettings); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vWafSettings) { + r.WafSettings = vWafSettings + } + return nil +} +func postReadExtractKeyWebSettingsFields(r *Key, o *KeyWebSettings) error { + return nil +} +func postReadExtractKeyAndroidSettingsFields(r *Key, o *KeyAndroidSettings) error { + return nil +} +func postReadExtractKeyIosSettingsFields(r *Key, o *KeyIosSettings) error { + return nil +} +func postReadExtractKeyTestingOptionsFields(r *Key, o *KeyTestingOptions) error { + return nil +} +func postReadExtractKeyWafSettingsFields(r *Key, o *KeyWafSettings) error { + return nil +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl new file mode 100644 index 000000000000..8c1c0cbd34c8 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl @@ -0,0 +1,317 @@ +package recaptchaenterprise + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +func DCLKeySchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "RecaptchaEnterprise/Key", + Description: "The RecaptchaEnterprise Key resource", + StructName: "Key", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "key", + Required: true, + Description: "A full instance of a Key", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Key", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Key": &dcl.Component{ + Title: "Key", + ID: "projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", + ParentContainer: "project", + LabelsField: "labels", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "displayName", + "project", + }, + Properties: map[string]*dcl.Property{ + "androidSettings": &dcl.Property{ + Type: "object", + GoName: "AndroidSettings", + GoType: "KeyAndroidSettings", + Description: "Settings for keys that can be used by Android apps.", + Conflicts: []string{ + "webSettings", + "iosSettings", + }, + Properties: map[string]*dcl.Property{ + "allowAllPackageNames": &dcl.Property{ + Type: "boolean", + GoName: "AllowAllPackageNames", + Description: "If set to true, it means allowed_package_names will not be enforced.", + }, + "allowedPackageNames": &dcl.Property{ + Type: "array", + GoName: "AllowedPackageNames", + Description: "Android package names of apps allowed to use the key. Example: 'com.companyname.appname'", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "The timestamp corresponding to the creation of this Key.", + Immutable: true, + }, + "displayName": &dcl.Property{ + Type: "string", + GoName: "DisplayName", + Description: "Human-readable display name of this key. Modifiable by user.", + }, + "iosSettings": &dcl.Property{ + Type: "object", + GoName: "IosSettings", + GoType: "KeyIosSettings", + Description: "Settings for keys that can be used by iOS apps.", + Conflicts: []string{ + "webSettings", + "androidSettings", + }, + Properties: map[string]*dcl.Property{ + "allowAllBundleIds": &dcl.Property{ + Type: "boolean", + GoName: "AllowAllBundleIds", + Description: "If set to true, it means allowed_bundle_ids will not be enforced.", + }, + "allowedBundleIds": &dcl.Property{ + Type: "array", + GoName: "AllowedBundleIds", + Description: "iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname'", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + }, + }, + "labels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Labels", + Description: "See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels).", + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "The resource id for the Key, which is the same as the Site Key itself.", + Immutable: true, + ServerGeneratedParameter: true, + HasLongForm: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + Parameter: true, + }, + "testingOptions": &dcl.Property{ + Type: "object", + GoName: "TestingOptions", + GoType: "KeyTestingOptions", + Description: "Options for user acceptance testing.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "testingChallenge": &dcl.Property{ + Type: "string", + GoName: "TestingChallenge", + GoType: "KeyTestingOptionsTestingChallengeEnum", + Description: "For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE", + Immutable: true, + ServerDefault: true, + Enum: []string{ + "TESTING_CHALLENGE_UNSPECIFIED", + "NOCAPTCHA", + "UNSOLVABLE_CHALLENGE", + }, + }, + "testingScore": &dcl.Property{ + Type: "number", + Format: "double", + GoName: "TestingScore", + Description: "All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive.", + Immutable: true, + }, + }, + }, + "wafSettings": &dcl.Property{ + Type: "object", + GoName: "WafSettings", + GoType: "KeyWafSettings", + Description: "Settings specific to keys that can be used for WAF (Web Application Firewall).", + Immutable: true, + Required: []string{ + "wafService", + "wafFeature", + }, + Properties: map[string]*dcl.Property{ + "wafFeature": &dcl.Property{ + Type: "string", + GoName: "WafFeature", + GoType: "KeyWafSettingsWafFeatureEnum", + Description: "Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS", + Immutable: true, + Enum: []string{ + "CHALLENGE_PAGE", + "SESSION_TOKEN", + "ACTION_TOKEN", + "EXPRESS", + }, + }, + "wafService": &dcl.Property{ + Type: "string", + GoName: "WafService", + GoType: "KeyWafSettingsWafServiceEnum", + Description: "The WAF service that uses this key. Possible values: CA, FASTLY", + Immutable: true, + Enum: []string{ + "CA", + "FASTLY", + }, + }, + }, + }, + "webSettings": &dcl.Property{ + Type: "object", + GoName: "WebSettings", + GoType: "KeyWebSettings", + Description: "Settings for keys that can be used by websites.", + Conflicts: []string{ + "androidSettings", + "iosSettings", + }, + Required: []string{ + "integrationType", + }, + Properties: map[string]*dcl.Property{ + "allowAllDomains": &dcl.Property{ + Type: "boolean", + GoName: "AllowAllDomains", + Description: "If set to true, it means allowed_domains will not be enforced.", + }, + "allowAmpTraffic": &dcl.Property{ + Type: "boolean", + GoName: "AllowAmpTraffic", + Description: "If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type.", + }, + "allowedDomains": &dcl.Property{ + Type: "array", + GoName: "AllowedDomains", + Description: "Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com'", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "challengeSecurityPreference": &dcl.Property{ + Type: "string", + GoName: "ChallengeSecurityPreference", + GoType: "KeyWebSettingsChallengeSecurityPreferenceEnum", + Description: "Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY", + ServerDefault: true, + Enum: []string{ + "CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED", + "USABILITY", + "BALANCE", + "SECURITY", + }, + }, + "integrationType": &dcl.Property{ + Type: "string", + GoName: "IntegrationType", + GoType: "KeyWebSettingsIntegrationTypeEnum", + Description: "Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE", + Immutable: true, + Enum: []string{ + "SCORE", + "CHECKBOX", + "INVISIBLE", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go b/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go new file mode 100644 index 000000000000..5235a3834f54 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/provider_dcl_client_creation.go @@ -0,0 +1,30 @@ +package recaptchaenterprise + +import ( + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "time" +) + +func NewDCLRecaptchaEnterpriseClient(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) *Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dcl.DCLLogger{}), + dcl.WithBasePath(config.RecaptchaEnterpriseBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return NewClient(dclConfig) +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go new file mode 100644 index 000000000000..57a675af7c13 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go @@ -0,0 +1,689 @@ +package recaptchaenterprise + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceRecaptchaEnterpriseKey() *schema.Resource { + return &schema.Resource{ + Create: resourceRecaptchaEnterpriseKeyCreate, + Read: resourceRecaptchaEnterpriseKeyRead, + Update: resourceRecaptchaEnterpriseKeyUpdate, + Delete: resourceRecaptchaEnterpriseKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRecaptchaEnterpriseKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Human-readable display name of this key. Modifiable by user.", + }, + + "android_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by Android apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyAndroidSettingsSchema(), + ConflictsWith: []string{"web_settings", "ios_settings"}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "ios_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by iOS apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyIosSettingsSchema(), + ConflictsWith: []string{"web_settings", "android_settings"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "testing_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Options for user acceptance testing.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyTestingOptionsSchema(), + }, + + "waf_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Settings specific to keys that can be used for WAF (Web Application Firewall).", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyWafSettingsSchema(), + }, + + "web_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by websites.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyWebSettingsSchema(), + ConflictsWith: []string{"android_settings", "ios_settings"}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "The timestamp corresponding to the creation of this Key.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels).\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The resource id for the Key, which is the same as the Site Key itself.", + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + }, + } +} + +func RecaptchaEnterpriseKeyAndroidSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_package_names": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_package_names will not be enforced.", + }, + + "allowed_package_names": { + Type: schema.TypeList, + Optional: true, + Description: "Android package names of apps allowed to use the key. Example: 'com.companyname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyIosSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_bundle_ids": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_bundle_ids will not be enforced.", + }, + + "allowed_bundle_ids": { + Type: schema.TypeList, + Optional: true, + Description: "iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyTestingOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "testing_challenge": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE", + }, + + "testing_score": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Description: "All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive.", + }, + }, + } +} + +func RecaptchaEnterpriseKeyWafSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "waf_feature": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS", + }, + + "waf_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The WAF service that uses this key. Possible values: CA, FASTLY", + }, + }, + } +} + +func RecaptchaEnterpriseKeyWebSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "integration_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE", + }, + + "allow_all_domains": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_domains will not be enforced.", + }, + + "allow_amp_traffic": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type.", + }, + + "allowed_domains": { + Type: schema.TypeList, + Optional: true, + Description: "Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "challenge_security_preference": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY", + }, + }, + } +} + +func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Key: %s", err) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // ID has a server-generated value, set again after creation. + + id, err = res.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetKey(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("RecaptchaEnterpriseKey %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("android_settings", flattenRecaptchaEnterpriseKeyAndroidSettings(res.AndroidSettings)); err != nil { + return fmt.Errorf("error setting android_settings in state: %s", err) + } + if err = d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("error setting effective_labels in state: %s", err) + } + if err = d.Set("ios_settings", flattenRecaptchaEnterpriseKeyIosSettings(res.IosSettings)); err != nil { + return fmt.Errorf("error setting ios_settings in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("testing_options", flattenRecaptchaEnterpriseKeyTestingOptions(res.TestingOptions)); err != nil { + return fmt.Errorf("error setting testing_options in state: %s", err) + } + if err = d.Set("waf_settings", flattenRecaptchaEnterpriseKeyWafSettings(res.WafSettings)); err != nil { + return fmt.Errorf("error setting waf_settings in state: %s", err) + } + if err = d.Set("web_settings", flattenRecaptchaEnterpriseKeyWebSettings(res.WebSettings)); err != nil { + return fmt.Errorf("error setting web_settings in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("labels", flattenRecaptchaEnterpriseKeyLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("terraform_labels", flattenRecaptchaEnterpriseKeyTerraformLabels(res.Labels, d)); err != nil { + return fmt.Errorf("error setting terraform_labels in state: %s", err) + } + + return nil +} +func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + Labels: tpgresource.CheckStringMap(d.Get("effective_labels")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WafSettings: expandRecaptchaEnterpriseKeyWafSettings(d.Get("waf_settings")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Key %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteKey(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Key: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Key %q", d.Id()) + return nil +} + +func resourceRecaptchaEnterpriseKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/keys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandRecaptchaEnterpriseKeyAndroidSettings(o interface{}) *KeyAndroidSettings { + if o == nil { + return EmptyKeyAndroidSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyAndroidSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyAndroidSettings{ + AllowAllPackageNames: dcl.Bool(obj["allow_all_package_names"].(bool)), + AllowedPackageNames: tpgdclresource.ExpandStringArray(obj["allowed_package_names"]), + } +} + +func flattenRecaptchaEnterpriseKeyAndroidSettings(obj *KeyAndroidSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_package_names": obj.AllowAllPackageNames, + "allowed_package_names": obj.AllowedPackageNames, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyIosSettings(o interface{}) *KeyIosSettings { + if o == nil { + return EmptyKeyIosSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyIosSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyIosSettings{ + AllowAllBundleIds: dcl.Bool(obj["allow_all_bundle_ids"].(bool)), + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), + } +} + +func flattenRecaptchaEnterpriseKeyIosSettings(obj *KeyIosSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_bundle_ids": obj.AllowAllBundleIds, + "allowed_bundle_ids": obj.AllowedBundleIds, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyTestingOptions(o interface{}) *KeyTestingOptions { + if o == nil { + return EmptyKeyTestingOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyTestingOptions + } + obj := objArr[0].(map[string]interface{}) + return &KeyTestingOptions{ + TestingChallenge: KeyTestingOptionsTestingChallengeEnumRef(obj["testing_challenge"].(string)), + TestingScore: dcl.Float64(obj["testing_score"].(float64)), + } +} + +func flattenRecaptchaEnterpriseKeyTestingOptions(obj *KeyTestingOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "testing_challenge": obj.TestingChallenge, + "testing_score": obj.TestingScore, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyWafSettings(o interface{}) *KeyWafSettings { + if o == nil { + return EmptyKeyWafSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyWafSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyWafSettings{ + WafFeature: KeyWafSettingsWafFeatureEnumRef(obj["waf_feature"].(string)), + WafService: KeyWafSettingsWafServiceEnumRef(obj["waf_service"].(string)), + } +} + +func flattenRecaptchaEnterpriseKeyWafSettings(obj *KeyWafSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "waf_feature": obj.WafFeature, + "waf_service": obj.WafService, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyWebSettings(o interface{}) *KeyWebSettings { + if o == nil { + return EmptyKeyWebSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return EmptyKeyWebSettings + } + obj := objArr[0].(map[string]interface{}) + return &KeyWebSettings{ + IntegrationType: KeyWebSettingsIntegrationTypeEnumRef(obj["integration_type"].(string)), + AllowAllDomains: dcl.Bool(obj["allow_all_domains"].(bool)), + AllowAmpTraffic: dcl.Bool(obj["allow_amp_traffic"].(bool)), + AllowedDomains: tpgdclresource.ExpandStringArray(obj["allowed_domains"]), + ChallengeSecurityPreference: KeyWebSettingsChallengeSecurityPreferenceEnumRef(obj["challenge_security_preference"].(string)), + } +} + +func flattenRecaptchaEnterpriseKeyWebSettings(obj *KeyWebSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "integration_type": obj.IntegrationType, + "allow_all_domains": obj.AllowAllDomains, + "allow_amp_traffic": obj.AllowAmpTraffic, + "allowed_domains": obj.AllowedDomains, + "challenge_security_preference": obj.ChallengeSecurityPreference, + } + + return []interface{}{transformed} + +} + +func flattenRecaptchaEnterpriseKeyLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} + +func flattenRecaptchaEnterpriseKeyTerraformLabels(v map[string]string, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + + transformed := make(map[string]interface{}) + if l, ok := d.Get("terraform_labels").(map[string]interface{}); ok { + for k := range l { + transformed[k] = v[k] + } + } + + return transformed +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl new file mode 100644 index 000000000000..a7f3d8d1e71f --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl @@ -0,0 +1,497 @@ +package recaptchaenterprise_test + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +{{- if eq $.TargetVersionName "ga" }} + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" +{{- else }} + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta" +{{- end }} + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccRecaptchaEnterpriseKey_AndroidKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_AndroidKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_IosKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_IosKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_MinimalKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_MinimalKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WafKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WafKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebScoreKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccRecaptchaEnterpriseKey_AndroidKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + android_settings { + allow_all_package_names = false + allowed_package_names = ["com.android.application"] + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + ios_settings { + allow_all_bundle_ids = false + allowed_bundle_ids = ["com.companyname.appname"] + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_MinimalKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } + + labels = {} +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WafKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + waf_settings { + waf_feature = "CHALLENGE_PAGE" + waf_service = "CA" + } + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = false + allowed_domains = ["subdomain.example.com"] + challenge_security_preference = "SECURITY" + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } + + labels = { + label-one = "value-one" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = false + allow_amp_traffic = true + allowed_domains = ["subdomain.example.com"] + } + + labels = { + label-two = "value-two" + } +} + + +`, context) +} + +func testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_recaptcha_enterprise_key" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetKey(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_recaptcha_enterprise_key still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go new file mode 100644 index 000000000000..443deff1d889 --- /dev/null +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go @@ -0,0 +1,53 @@ +package recaptchaenterprise + +import ( + "context" + "log" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepersLegacy("RecaptchaEnterpriseKey", testSweepRecaptchaEnterpriseKey) +} + +func testSweepRecaptchaEnterpriseKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for RecaptchaEnterpriseKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableRecaptchaEnterpriseKey) + if err != nil { + return err + } + return nil +} + +func isDeletableRecaptchaEnterpriseKey(r *Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/canonicalize.go b/mmv1/third_party/terraform/tpgdclresource/canonicalize.go new file mode 100755 index 000000000000..0ca0729f21f9 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/canonicalize.go @@ -0,0 +1,857 @@ +package tpgdclresource + +import ( + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "bitbucket.org/creachadair/stringset" + glog "github.com/golang/glog" +) + +var selfLinkIgnorableComponents = stringset.New("projects", "regions", "locations", "zones", "organizations", "compute", "v1", "v1beta1", "beta") + +// SelfLinkToSelfLink returns true if left and right are equivalent for selflinks. +// That means that they are piecewise equal, comparing components, allowing for +// certain elements to be dropped ("projects", "regions", etc.). It also allows +// any value to be present in the second-to-last field (where "instances" or +// "databases", etc, would be.) +func SelfLinkToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if lurl, err := url.Parse(left); err == nil { + left = lurl.EscapedPath() + } + if rurl, err := url.Parse(right); err == nil { + right = rurl.EscapedPath() + } + if strings.HasPrefix(left, "/") { + left = left[1:len(left)] + } + if strings.HasPrefix(right, "/") { + right = right[1:len(right)] + } + if strings.HasSuffix(left, right) || strings.HasSuffix(right, left) { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + li := 0 + ri := 0 + for li < len(lcomp) && ri < len(rcomp) { + switch { + case lcomp[li] == rcomp[ri]: + li++ + ri++ + case selfLinkIgnorableComponents.Contains(lcomp[li]): + li++ + case selfLinkIgnorableComponents.Contains(rcomp[ri]): + ri++ + // The second-to-last element in a long-form self-link contains the + // name of the resource. The name of the resource might be anything, + // rather than keep a list of all resources, we will just ignore + // the second-to-last field if one argument is exactly one remaining + // field longer than the other. + case len(lcomp) == li+2 && len(rcomp) == ri+1: + li++ + case len(rcomp) == ri+2 && len(lcomp) == li+1: + ri++ + default: + return false + } + } + return true +} + +// StringCanonicalize checks canonicalization for strings. It matches self-links using NameToSelfLink. +func StringCanonicalize(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == right { + return true + } + + if IsPartialSelfLink(left) || IsPartialSelfLink(right) || IsSelfLink(left) || IsSelfLink(right) { + return NameToSelfLink(l, r) + } + + return false +} + +// StringArrayCanonicalize checks canonicalization for arrays of strings. It matches self-links using NameToSelfLink. +func StringArrayCanonicalize(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if !StringCanonicalize(&l[i], &r[i]) { + return false + } + } + return true +} + +// BoolCanonicalize checks canonicalization for booleans. +func BoolCanonicalize(l, r *bool) bool { + if l == nil && r == nil { + return true + } + if l != nil && r == nil { + left := *l + return left == false + } + + if r != nil && l == nil { + right := *r + return right == false + } + + left := *l + right := *r + + return left == right +} + +// NameToSelfLink returns true if left and right are equivalent for Names / SelfLinks. +// It allows all the deviations that SelfLinkToSelfLink allows, plus it allows one +// of the values to simply be the last element of the other value. +func NameToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == right { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + if len(lcomp) > 1 && len(rcomp) > 1 { + return SelfLinkToSelfLink(&left, &right) + } + if len(lcomp) > 1 && lcomp[len(lcomp)-1] == right { + return true + } + if len(rcomp) > 1 && rcomp[len(rcomp)-1] == left { + return true + } + return false +} + +// PartialSelfLinkToSelfLink returns true if left and right are equivalent for SelfLinks and partial +// SelfLinks. It allows all the deviations that SelfLink allows, except that it works +// backwards, and returns true when one or the other is empty - in that sense, it allows whatever +// specification, starting from the most-specific +func PartialSelfLinkToSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + left := *l + right := *r + + if left == "" && right == "" { + return true + } + if left == "" || right == "" { + return false + } + if NameToSelfLink(&left, &right) { + return true + } + lcomp := strings.Split(left, "/") + rcomp := strings.Split(right, "/") + li := len(lcomp) - 1 + ri := len(rcomp) - 1 + for li >= 0 && ri >= 0 { + switch { + case lcomp[li] == rcomp[ri]: + li-- + ri-- + case selfLinkIgnorableComponents.Contains(lcomp[li]): + li-- + case selfLinkIgnorableComponents.Contains(rcomp[ri]): + ri-- + // As in SelfLinkToSelfLink, we permit any value in the second-to-last field + // for the value which is longer. + case len(lcomp) == li+2 && len(rcomp) == ri+2 && li > ri: + li-- + case len(lcomp) == li+2 && len(rcomp) == ri+2 && ri > li: + ri-- + default: + return false + } + + } + return true +} + +// PartialSelfLinkToSelfLinkArray returns true if left and right are all equivalent for SelfLinks. +func PartialSelfLinkToSelfLinkArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if !PartialSelfLinkToSelfLink(&l[i], &r[i]) { + return false + } + } + return true +} + +func WithoutTrailingDotArrayInterface(l, r any) bool { + lVal, _ := l.([]string) + rVal, _ := r.([]string) + return WithoutTrailingDotArray(lVal, rVal) +} + +// WithoutTrailingDotArray returns true if WithoutTrailingDot returns true for each +// pair of elements in the lists. +func WithoutTrailingDotArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i, lv := range l { + if !WithoutTrailingDot(lv, r[i]) { + return false + } + } + return true +} + +// WithoutTrailingDot returns true if the arguments are equivalent ignoring a final period. +// This is useful for comparing absolute & relative domain names. +func WithoutTrailingDot(l, r string) bool { + return strings.TrimSuffix(l, ".") == strings.TrimSuffix(r, ".") +} + +// QuoteAndCaseInsensitiveString returns true if the arguments are considered equal ignoring case +// and quotedness (e.g. "foo" and foo are equivalent). +func QuoteAndCaseInsensitiveString(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + if uq, err := strconv.Unquote(*l); err == nil { + l = &uq + } + if uq, err := strconv.Unquote(*r); err == nil { + r = &uq + } + return CaseInsensitiveString(l, r) +} + +// QuoteAndCaseInsensitiveStringArray returns true if the arguments are considered equal ignoring case +// and quotedness (e.g. "foo" and foo are equivalent), but including ordering. +func QuoteAndCaseInsensitiveStringArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i := range l { + if uq, err := strconv.Unquote(l[i]); err == nil { + l[i] = uq + } + if uq, err := strconv.Unquote(r[i]); err == nil { + r[i] = uq + } + } + return CaseInsensitiveStringArray(l, r) +} + +// CaseInsensitiveStringArray returns true if the arguments are considered equal ignoring case, +// but including ordering. +func CaseInsensitiveStringArray(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i, lv := range l { + if !strings.EqualFold(lv, r[i]) { + return false + } + } + return true +} + +// CaseInsensitiveString returns true if the arguments are considered equal ignoring case. +func CaseInsensitiveString(l, r *string) bool { + if l == nil && r == nil { + return true + } + if l == nil || r == nil { + return false + } + return strings.EqualFold(*l, *r) +} + +// IsZeroValue returns true if the argument is considered empty/unset. +func IsZeroValue(v any) bool { + if t, ok := v.(time.Time); ok { + return t.IsZero() + } + val := reflect.ValueOf(v) + return !val.IsValid() || !reflect.Indirect(val).IsValid() || ((val.Kind() == reflect.Interface || + val.Kind() == reflect.Chan || + val.Kind() == reflect.Func || + val.Kind() == reflect.Ptr || + val.Kind() == reflect.Map || + val.Kind() == reflect.Slice) && val.IsNil()) +} + +// SliceEquals takes in two slices of strings and checks their equality +func SliceEquals(v []string, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// MapEquals returns if two maps are equal, while ignoring any keys with ignorePrefixes. +func MapEquals(di, ai any, ignorePrefixes []string) bool { + d, ok := di.(map[string]string) + if !ok { + return false + } + + a, ok := ai.(map[string]string) + if !ok { + return false + } + + for k, v := range d { + if isIgnored(k, ignorePrefixes) { + continue + } + + av, ok := a[k] + if !ok { + return false + } + if !reflect.DeepEqual(v, av) { + return false + } + } + + for k, v := range a { + if isIgnored(k, ignorePrefixes) { + continue + } + + dv, ok := d[k] + if !ok { + return false + } + if !reflect.DeepEqual(v, dv) { + return false + } + } + + return true + +} + +// isIgnored returns true if this prefix should be ignored. +func isIgnored(v string, ignoredPrefixes []string) bool { + for _, p := range ignoredPrefixes { + if strings.Contains(v, p) { + return true + } + } + return false +} + +// CompareStringSets returns two slices of strings, +// one of strings in set a but not b, and one of strings in set b but not a. +func CompareStringSets(a, b []string) (toAdd, toRemove []string) { + for _, item := range a { + inB := false + for _, i2 := range b { + if i2 == item { + inB = true + } + } + if !inB { + toAdd = append(toAdd, item) + } + } + for _, item := range b { + inA := false + for _, i2 := range a { + if i2 == item { + inA = true + } + } + if !inA { + toRemove = append(toRemove, item) + } + } + return +} + +// WrapStringsWithKey returns a slice of maps with one key (the 'key' argument) +// and one value (each value in 'values'). +// e.g. ("foo", ["bar", "baz", "qux"]) => [{"foo": "bar"}, {"foo": "baz"}, {"foo": "qux"}]. +// Useful for, for instance, +// https://cloud.google.com/compute/docs/reference/rest/v1/targetPools/addHealthCheck +func WrapStringsWithKey(key string, values []string) ([]map[string]string, error) { + r := make([]map[string]string, len(values)) + for i, v := range values { + r[i] = map[string]string{key: v} + } + return r, nil +} + +// FloatSliceEquals takes in two slices of float64s and checks their equality +func FloatSliceEquals(v []float64, q []float64) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// IntSliceEquals takes in two slices of int64s and checks their equality +func IntSliceEquals(v []int64, q []int64) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if v[i] != q[i] { + return false + } + } + return true +} + +// StringSliceEquals returns true if v, q arrays of strings are equal according to StringEquals. +func StringSliceEquals(v, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if !StringEquals(&v[i], &q[i]) { + return false + } + } + return true +} + +// UnorderedStringSliceEquals returns true if a, b contains same set of elements irrespective of their ordering. +func UnorderedStringSliceEquals(a, b []string) bool { + aMap := make(map[string]int) + bMap := make(map[string]int) + + for _, val := range a { + aMap[val]++ + } + for _, val := range b { + bMap[val]++ + } + + if len(aMap) != len(bMap) { + return false + } + + for k, v := range aMap { + bv, ok := bMap[k] + if !ok { + return false + } + if v != bv { + return false + } + } + + return true +} + +// StringSliceEqualsWithSelfLink returns true if v, q arrays of strings are equal according to StringEqualsWithSelfLink +func StringSliceEqualsWithSelfLink(v, q []string) bool { + if len(v) != len(q) { + return false + } + + for i := 0; i < len(v); i++ { + if !StringEqualsWithSelfLink(&v[i], &q[i]) { + return false + } + } + return true +} + +// DeriveFieldArray calls DeriveField on each entry in the provided slice. The final +// entry in the input variadic argument can be a slice, and those values will be replaced +// by the values in the provided current value. +func DeriveFieldArray(pattern string, cVal []string, fs ...any) ([]string, error) { + var s []string + var allFs []*string + for _, f := range fs[:len(fs)-1] { + allFs = append(allFs, f.(*string)) + } + for _, cv := range cVal { + glog.Infof("deriving %q from %q, %v", pattern, cv, append(allFs, &cv)) + sval, err := DeriveField(pattern, &cv, append(allFs, &cv)...) + if err != nil { + return nil, err + } + if sval == nil { + return nil, fmt.Errorf("got nil back from DeriveField for %q", cv) + } + s = append(s, *sval) + glog.Infof("derived %q", *sval) + } + return s, nil +} + +// DeriveField deals with the outgoing portion of derived fields. The derived fields' +// inputs might be in any form - for instance, a derived name field might be set to +// project/region/name, projects/project/regions/region/objects/name, or just name. +// This function returns the best reasonable guess at the user's intent. If the current +// value (cVal) matches any of those, it will return the current value. If it doesn't, +// it will be ignored (even if nil). +func DeriveField(pattern string, cVal *string, fs ...*string) (*string, error) { + var currentValue string + // interface{} for fmt.Sprintf. + fields := make([]any, len(fs)) + if cVal == nil { + // might still be doable from "fields"! + currentValue = "" + } else { + currentValue = *cVal + } + for i, f := range fs { + if IsEmptyValueIndirect(f) { + if currentValue == "" { + // This field may not be required, so we shouldn't error out. + // Erroring out would cause the DCL to stop if this field isn't set (which it might not be!) + return nil, nil + } + // might still be doable from currentValue + fields[i] = "" + } else { + fields[i] = *f + } + } + + patternParts := strings.Split(pattern, "/") + valueParts := strings.Split(currentValue, "/") + + // currentValue may be a full self-link, so we need to filter out unnecessary beginning parts. + if len(valueParts) > len(patternParts) { + for index, valuePart := range valueParts { + if valuePart == patternParts[0] { + valueParts = valueParts[index:len(valueParts)] + break + } + } + } + + if len(patternParts) == len(valueParts) { + // check if the current value fits the pattern. + match := true + for i := range patternParts { + if patternParts[i] != "%s" && valueParts[i] != patternParts[i] { + match = false + break + } + } + if match { + return ¤tValue, nil + } + } + if len(valueParts) == strings.Count(pattern, "%s") { + iParts := make([]any, len(valueParts)) + for i, s := range valueParts { + iParts[i] = s + } + value := fmt.Sprintf(pattern, iParts...) + return &value, nil + } + value := fmt.Sprintf(pattern, fields...) + return &value, nil +} + +// IsEmptyValueIndirect returns true if the value provided is "empty", according +// to the golang rules. This corresponds to whether the value should be sent by the +// client if the existing value is nil - it is useful for diffing a response against a provided +// value. The "Indirect" refers to the fact that this method returns correct +// results even if the provided value is a pointer. +func IsEmptyValueIndirect(i any) bool { + if i == nil { + return true + } + + rt := reflect.TypeOf(i) + switch rt.Kind() { + case reflect.Slice: + return reflect.ValueOf(i).Len() == 0 + case reflect.Array: + return rt.Len() == 0 + case reflect.Map: + return len(reflect.ValueOf(i).MapKeys()) == 0 + } + + iv := reflect.Indirect(reflect.ValueOf(i)) + + // All non-nil bool values are not empty. + if iv.Kind() == reflect.Bool { + return false + } + + if !iv.IsValid() || iv.IsZero() { + return true + } + if hasEmptyStructField(i) { + return true + } + return false +} + +// hasEmptyStructField returns true if the provided value is a struct +// with an unexported field called 'empty', and that value is a boolean, +// and that boolean is true. This is useful when a user needs to explicitly +// set their intention that a value be empty. +func hasEmptyStructField(i any) bool { + iv := reflect.Indirect(reflect.ValueOf(i)) + if !iv.IsValid() { + return false + } + if iv.Kind() == reflect.Struct { + if iv.FieldByName("empty").IsValid() && iv.FieldByName("empty").Bool() { + return true + } + } + return false +} + +// MatchingSemverInterface matches two interfaces according to MatchingSemver +func MatchingSemverInterface(lp, rp any) bool { + if lp == nil && rp == nil { + return true + } + if lp == nil || rp == nil { + return false + } + + lpVal, _ := lp.(*string) + rpVal, _ := rp.(*string) + return MatchingSemver(lpVal, rpVal) +} + +// MatchingSemver returns whether the two strings should be considered equivalent +// according to semver rules. If one provides more detail than the other, this is +// acceptable, as long as both are consistent in the detail they do provide. +// For instance, 1.16 == 1.16.4 != 1.15. +func MatchingSemver(lp, rp *string) bool { + if lp == nil && rp == nil { + return true + } + if lp == nil || rp == nil { + return false + } + l := *lp + r := *rp + if l == "latest" || r == "latest" { + return true + } + + // If default version chosen, we should assume API returned the default version. + if l == "-" { + return true + } + + ld := strings.Split(l, "-") + rd := strings.Split(r, "-") + if ld[0] == rd[0] { + return true + } + if len(ld) == 2 && len(rd) == 2 { + // nonmatching post-dash version. + return false + } + ldo := strings.Split(ld[0], ".") + rdo := strings.Split(rd[0], ".") + + for i := 0; i < len(ldo) && i < len(rdo); i++ { + if ldo[i] != rdo[i] { + return false + } + } + return true +} + +// DeriveFromPattern attempts to achieve the same end goal as DeriveField +// but by using regular expressions rather than assumptions about the +// format of the inputs based on the number of `/`. This is important for fields that allow `/` +// characters in their names. +func DeriveFromPattern(pattern string, cVal *string, fs ...*string) (*string, error) { + var currentValue string + if cVal == nil { + // might still be doable from "fields"! + currentValue = "" + } else { + currentValue = *cVal + } + + if !strings.HasSuffix(pattern, "%s") { + // If the pattern does not end with %s we cannot assume anything past the last expected + // `/` character is part of a name + return nil, fmt.Errorf("pattern did not end with %%s, it does not work with the current implementation %v", pattern) + } + // Build regexp from pattern + regex, err := regexFromPattern(pattern) + if err != nil { + return nil, err + } + + if matches := regex.FindStringSubmatch(currentValue); len(matches) > 0 { + // Found a match to the pattern, use the capture groups to populate the pattern + s := make([]any, len(matches)) + for i, v := range matches { + s[i] = v + } + value := fmt.Sprintf(pattern, s[1:]...) + return &value, nil + } + + // Did not find a match to the pattern, use the fields to populate the pattern + fields := make([]any, len(fs)) + + for i, f := range fs { + if f == nil { + // This field may not be required, so we shouldn't error out. + // Erroring out would cause the DCL to stop if this field isn't set (which it might not be!) + return nil, nil + } + fields[i] = *f + } + value := fmt.Sprintf(pattern, fields...) + return &value, nil +} + +func regexFromPattern(pattern string) (*regexp.Regexp, error) { + // Replace string formatting with capture groups except for the last one + // the last one will capture all trailing values + re := strings.Replace(pattern, "%s", "([^/]+)", strings.Count(pattern, "%s")-1) + // Wildcard capture at the end, allows for the last value to include `/` characters + re = strings.ReplaceAll(re, "%s", "(.+)") + return regexp.Compile(re) +} + +// NameFromSelfLink takes in a self link string and returns the name. +func NameFromSelfLink(sl *string) (*string, error) { + if sl == nil { + return nil, nil + } + curNameParts := strings.Split(*sl, "/") + val := curNameParts[len(curNameParts)-1] + return &val, nil +} + +// StringEqualsWithSelfLink returns true if these two strings are equal. +// If these functions are self links, they'll do self-link comparisons. +func StringEqualsWithSelfLink(l, r *string) bool { + if l == nil && r == nil { + return true + } + + if l == nil || r == nil { + return false + } + + left := *l + right := *r + + if IsSelfLink(left) || IsSelfLink(right) || IsPartialSelfLink(left) || IsPartialSelfLink(right) { + lp := strings.Split(left, "/") + rp := strings.Split(right, "/") + return lp[len(lp)-1] == rp[len(rp)-1] + } else { + return left == right + } +} + +// StringEquals returns true if these two strings are equal. +func StringEquals(l, r *string) bool { + if l == nil && r == nil { + return true + } + + if l == nil || r == nil { + return false + } + + left := *l + right := *r + + return left == right +} + +// IsPartialSelfLink returns true if this string represents a partial self link. +func IsPartialSelfLink(s string) bool { + return strings.HasPrefix(s, "projects/") || strings.HasPrefix(s, "organizations/") || strings.HasPrefix(s, "folders/") || strings.HasPrefix(s, "billingAccounts/") || strings.HasPrefix(s, "tagKeys/") || strings.HasPrefix(s, "tagValues/") || strings.HasPrefix(s, "groups/") +} + +// IsSelfLink returns true if this string represents a full self link. +func IsSelfLink(s string) bool { + r := regexp.MustCompile(`(https:\/\/)?(www\.)?([a-z]*)?googleapis.com\/`) + return r.MatchString(s) +} + +// ValueShouldBeSent returns if a value should be sent as part of the JSON request. +func ValueShouldBeSent(v any) bool { + if v == nil { + return false + } + + iv := reflect.Indirect(reflect.ValueOf(v)) + + // All booleans should be sent. + if iv.Kind() == reflect.Bool { + return true + } + + if !iv.IsValid() || iv.IsZero() { + return false + } + + return !IsEmptyValueIndirect(v) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/client.go b/mmv1/third_party/terraform/tpgdclresource/client.go new file mode 100755 index 000000000000..2ed2de5b7820 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/client.go @@ -0,0 +1,7 @@ +package tpgdclresource + +// Scopes defines the common OAuth scopes needed for clients making GCP API calls. +var Scopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", +} diff --git a/mmv1/third_party/terraform/tpgdclresource/config.go b/mmv1/third_party/terraform/tpgdclresource/config.go new file mode 100755 index 000000000000..a726ec5644b1 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/config.go @@ -0,0 +1,573 @@ +package tpgdclresource + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "regexp" + "strings" + "time" + + // glog aliased import is necessary since these packages will be open-sourced + // and that is the public name of the google logging package. + glog "github.com/golang/glog" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +const ua = "DeclarativeClientLib/0.0.1" + +const defaultTimeout = 15 * time.Minute + +// ConfigOption is used to functionally configure Configs. +type ConfigOption func(*Config) + +// Config is used to enclose the credentials and http client used to make +// requests to GCP APIs. +type Config struct { + RetryProvider RetryProvider + codeRetryability map[int]Retryability + timeout time.Duration + header http.Header + clientOptions []option.ClientOption + userAgent string + contentType string + queryParams map[string]string + Logger ContextLogger + BasePath string + billingProject string + userOverrideProject bool +} + +// Retryability holds the details for one error code to determine if it is retyable. +// The regex field is compiled for use in error handling. +// To be retryable, the boolean must be true and the regex must match. +type Retryability struct { + Retryable bool + Pattern string + regex *regexp.Regexp + Timeout time.Duration +} + +// UserAgent returns the user agent for the config, which will always include the +// declarative SDK name and version. +func (c *Config) UserAgent() string { + if c.userAgent != "" { + return fmt.Sprintf("%s %s", c.userAgent, ua) + } + return ua +} + +// NewConfig creates a Config object. +func NewConfig(o ...ConfigOption) *Config { + retryable := Retryability{ + Retryable: true, + regex: regexp.MustCompile(".*"), + Timeout: defaultTimeout, + } + nonretryable := Retryability{Retryable: false} + c := &Config{ + codeRetryability: map[int]Retryability{ + 400: Retryability{ + Retryable: true, + regex: regexp.MustCompile("The resource '[-/a-zA-Z0-9]*' is not ready"), + Timeout: defaultTimeout, + }, + 403: Retryability{ + Retryable: true, + regex: regexp.MustCompile(".*API request rate quota.*"), + Timeout: defaultTimeout, + }, + 404: nonretryable, + 409: nonretryable, + 429: retryable, + 500: retryable, + 502: retryable, + 503: retryable, + }, + contentType: "application/json", + queryParams: map[string]string{"alt": "json"}, + Logger: ContextLogger{ + logger: DefaultLogger(LoggerInfo), + }, + RetryProvider: &BackoffRetryProvider{}, + } + + for _, opt := range o { + opt(c) + } + + return c +} + +// Clone returns a copy of an existing Config with optional new values. +func (c *Config) Clone(o ...ConfigOption) *Config { + result := &Config{ + RetryProvider: c.RetryProvider, + codeRetryability: c.codeRetryability, + timeout: c.timeout, + clientOptions: c.clientOptions, + userAgent: c.userAgent, + contentType: c.contentType, + queryParams: c.queryParams, + Logger: c.Logger, + BasePath: c.BasePath, + billingProject: c.billingProject, + userOverrideProject: c.userOverrideProject, + } + + if c.header != nil { + result.header = c.header.Clone() + } + + for _, opt := range o { + opt(result) + } + + return result +} + +// TimeoutOr returns a timeout for this config. If WithTimeout() was called, that timeout +// is used; if WithTimeout() was not called and a value is provided with `t`, that is used. +// Otherwise the default timeout is returned; +func (c *Config) TimeoutOr(t time.Duration) time.Duration { + if c.timeout != 0 { + return c.timeout + } else if t != 0 { + return t + } + return defaultTimeout +} + +type loggingTransport struct { + underlyingTransport http.RoundTripper + logger ContextLogger +} + +func (t loggingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + shouldLogRequest, err := ShouldLogRequest(req.Context()) + if err != nil { + t.logger.Infof("Error fetching ShouldLogRequest value: %v", err) + } + reqDump, err := httputil.DumpRequestOut(req, true) + randString := RandomString(5) + if err == nil { + if shouldLogRequest { + t.logger.InfoWithContextf(req.Context(), "Google API Request: (id %s)\n-----------[REQUEST]----------\n%s\n-------[END REQUEST]--------", randString, strings.ReplaceAll(string(reqDump), "\r\n", "\n")) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to make request (id %s): %s", randString, err) + } + resp, err := t.underlyingTransport.RoundTrip(req) + if err == nil { + respDump, err := httputil.DumpResponse(resp, true) + if err == nil { + respDumpStr := string(respDump) + if shouldLogRequest { + t.logger.InfoWithContextf(req.Context(), "Google API Response: (id %s) \n-----------[RESPONSE]----------\n%s\n-------[END RESPONSE]--------", randString, strings.ReplaceAll(respDumpStr, "\r\n", "\n")) + } else if resp.StatusCode >= 400 || strings.Contains(respDumpStr, "error") { + t.logger.InfoWithContextf(req.Context(), "Google API Request: (id %s)\n-----------[REQUEST]----------\n%s\n-------[END REQUEST]--------", randString, strings.ReplaceAll(string(reqDump), "\r\n", "\n")) + t.logger.InfoWithContextf(req.Context(), "Google API Response: (id %s) \n-----------[RESPONSE]----------\n%s\n-------[END RESPONSE]--------", randString, strings.ReplaceAll(respDumpStr, "\r\n", "\n")) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to parse response (id %s): %s", randString, err) + } + } else { + t.logger.WarningWithContextf(req.Context(), "Failed to get response (id %s): %s", randString, err) + } + return resp, err +} + +// ApplyOption is an option that is accepted by Apply() functions. +type ApplyOption interface { + Apply(*ApplyOpts) +} + +// ApplyOpts refers to options that are taken in the apply function. +type ApplyOpts struct { + params []LifecycleParam + stateHint Resource +} + +type lifecycleParamOption struct { + param LifecycleParam +} + +func (l lifecycleParamOption) Apply(o *ApplyOpts) { + o.params = append(o.params, l.param) +} + +// WithLifecycleParam allows a user to specify the proper lifecycle params. +func WithLifecycleParam(d LifecycleParam) ApplyOption { + return lifecycleParamOption{param: d} +} + +// FetchLifecycleParams returns the list of lifecycle params. +func FetchLifecycleParams(c []ApplyOption) []LifecycleParam { + var o ApplyOpts + for _, p := range c { + p.Apply(&o) + } + return o.params +} + +type stateHint struct { + state Resource +} + +func (s stateHint) Apply(o *ApplyOpts) { + o.stateHint = s.state +} + +// WithStateHint takes in a resource which will be used in place of the applied +// resource any time the current configuration of the resource is relevant. +// For instance, if an identity field will change, passing a state hint will ensure +// that the current resource is fetched (and possibly deleted). +func WithStateHint(r Resource) ApplyOption { + return stateHint{state: r} +} + +// FetchStateHint returns either nil or a Resource representing the pre-apply state. +func FetchStateHint(c []ApplyOption) Resource { + var o ApplyOpts + for _, p := range c { + p.Apply(&o) + } + return o.stateHint +} + +// WithRetryProvider allows a user to override default exponential backoff retry behavior. +func WithRetryProvider(r RetryProvider) ConfigOption { + return func(c *Config) { + c.RetryProvider = r + } +} + +// WithCodeRetryability allows a user to add additional retryable or non-retryable error codes. +// Each error code is mapped to a regexp which must match the error message to be retryable. +func WithCodeRetryability(cr map[int]Retryability) ConfigOption { + return func(c *Config) { + for code, retryability := range cr { + // Non-retryable errors do not need a regex to check against. + var re *regexp.Regexp + if retryability.Retryable { + re = regexp.MustCompile(retryability.Pattern) + } + // If timeout for this retryable error was not specified, assume default. + to := defaultTimeout + if retryability.Timeout > 0 { + to = retryability.Timeout + } + c.codeRetryability[code] = Retryability{ + Retryable: retryability.Retryable, + regex: re, + Timeout: to, + } + } + } +} + +// WithTimeout allows a user to override default operation timeout. +func WithTimeout(to time.Duration) ConfigOption { + return func(c *Config) { + c.timeout = to + } +} + +// WithLogger allows a user to specify a custom logger. +func WithLogger(l Logger) ConfigOption { + return func(c *Config) { + c.Logger.logger = l + } +} + +// WithContextLogger allows a user to specify a custom context logger. +func WithContextLogger(l ContextLogger) ConfigOption { + return func(c *Config) { + c.Logger = l + } +} + +// WithBasePath allows a base path to be overridden. +func WithBasePath(b string) ConfigOption { + return func(c *Config) { + c.BasePath = b + } +} + +// WithHeader allows aribitrary HTTP headers to be addded to requests. Not all headers +// (e.g., "Content-Type") can be overridden. To set the User-Agent header, use WithUserAgent(). +func WithHeader(header, value string) ConfigOption { + return func(c *Config) { + if c.header == nil { + c.header = make(http.Header) + } + c.header.Add(header, value) + } +} + +// WithUserAgent allows a user to specify a custom user-agent. +func WithUserAgent(ua string) ConfigOption { + return func(c *Config) { + c.userAgent = ua + } +} + +// WithContentType allows a user to override the default Content-Type header. +func WithContentType(ct string) ConfigOption { + return func(c *Config) { + c.contentType = ct + } +} + +// WithQueryParams allows a user to override the default query parameters. +func WithQueryParams(ps map[string]string) ConfigOption { + return func(c *Config) { + c.queryParams = ps + } +} + +// WithAPIKey returns a ConfigOption that specifies an API key to be used as the basis for authentication. +func WithAPIKey(apiKey string) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithAPIKey(apiKey)) + } +} + +// WithClientCertSource returns a ConfigOption that specifies a callback function for obtaining a TLS client certificate. +func WithClientCertSource(s option.ClientCertSource) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithClientCertSource(s)) + } +} + +// WithCredentials returns a ConfigOption that authenticates API calls using a caller-supplier Credentials struct. +func WithCredentials(creds *google.Credentials) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentials(creds)) + } +} + +// WithCredentialsFile returns a ConfigOption that authenticates API calls with the given service account or refresh token JSON credentials file. +func WithCredentialsFile(filename string) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentialsFile(filename)) + } +} + +// WithCredentialsJSON returns a ConfigOption that authenticates API calls with the given service account or refresh token JSON credentials. +func WithCredentialsJSON(p []byte) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithCredentialsJSON(p)) + } +} + +// WithHTTPClient returns a ConfigOption that specifies the HTTP client to use as the basis of communications. +// When used, the WithHTTPClient option takes precedent over all other supplied authentication options. +func WithHTTPClient(client *http.Client) ConfigOption { + return func(c *Config) { + c.clientOptions = append(c.clientOptions, option.WithHTTPClient(client)) + } +} + +// WithBillingProject returns a ConfigOption that specifies the user override project. +// This will be used to set X-Goog-User-Project on API calls. +// This option will be ignored unless WithUserProjectOverride is also used. +func WithBillingProject(project string) ConfigOption { + return func(c *Config) { + c.billingProject = project + } +} + +// WithUserProjectOverride returns a ConfigOption that turns on WithUserProjectOverride. +// This will send the X-Goog-User-Project on API calls. +func WithUserProjectOverride() ConfigOption { + return func(c *Config) { + c.userOverrideProject = true + } +} + +// Logger is an interface for logging requests and responses. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Info(args ...any) + Infof(format string, args ...any) + Warningf(format string, args ...any) + Warning(args ...any) +} + +// ContextLogger is the internal logger implementation. +type ContextLogger struct { + logger Logger +} + +// LoggerLevel is the most basic level that a logger should print. +// Anything at this level or more severe will be printed by this logger. +type LoggerLevel int32 + +const ( + // Fatal will print only Fatal logs. + Fatal LoggerLevel = iota + // Error will print Error and all Fatal logs. + Error + // Warning will print Warning and all Error logs. + Warning + // LoggerInfo will print Info and all Warning logs. + LoggerInfo +) + +// DefaultLogger returns the default logger for the Declarative Client Library. +func DefaultLogger(level LoggerLevel) Logger { + return glogger{level: level} +} + +type glogger struct { + level LoggerLevel +} + +// Fatal records Fatal errors. +func (l glogger) Fatal(args ...any) { + if l.level >= Fatal { + glog.Fatal(args...) + } +} + +// Fatalf records Fatal errors with added arguments. +func (l glogger) Fatalf(format string, args ...any) { + if l.level >= Fatal { + glog.Fatalf(format, HandleLogArgs(args...)...) + } +} + +// Info records Info errors. +func (l glogger) Info(args ...any) { + if l.level >= LoggerInfo { + glog.Info(args...) + } +} + +// Infof records Info errors with added arguments. +func (l glogger) Infof(format string, args ...any) { + if l.level >= LoggerInfo { + glog.Infof(format, HandleLogArgs(args...)...) + } +} + +// Warningf records Warning errors with added arguments. +func (l glogger) Warningf(format string, args ...any) { + if l.level >= Warning { + glog.Warningf(format, HandleLogArgs(args...)...) + } +} + +// Warning records Warning errors. +func (l glogger) Warning(args ...any) { + if l.level >= Warning { + glog.Warning(args...) + } +} + +// Fatal records Fatal errors. +func (l ContextLogger) Fatal(args ...any) { + l.logger.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l ContextLogger) Fatalf(format string, args ...any) { + l.logger.Fatalf(format, HandleLogArgs(args...)...) +} + +// Info records Info errors. +func (l ContextLogger) Info(args ...any) { + l.logger.Info(args...) +} + +// Infof records Info errors with added arguments. +func (l ContextLogger) Infof(format string, args ...any) { + l.logger.Infof(format, HandleLogArgs(args...)...) +} + +// Warningf records Warning errors with added arguments. +func (l ContextLogger) Warningf(format string, args ...any) { + l.logger.Warningf(format, HandleLogArgs(args...)...) +} + +// Warning records Warning errors. +func (l ContextLogger) Warning(args ...any) { + l.logger.Warning(args...) +} + +// FatalWithContext records Fatal errors with context values. +func (l ContextLogger) FatalWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Fatal(args...) +} + +// FatalWithContextf records Fatal errors with added arguments with context values. +func (l ContextLogger) FatalWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Fatalf(format, args...) +} + +// InfoWithContext records Info errors with context values. +func (l ContextLogger) InfoWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Info(args...) +} + +// InfoWithContextf records Info errors with added arguments with context values. +func (l ContextLogger) InfoWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Infof(format, args...) +} + +// WarningWithContextf records Warning errors with added arguments with context values. +func (l ContextLogger) WarningWithContextf(ctx context.Context, format string, args ...any) { + format = fmt.Sprintf("%s %s", ConstructLogPrefixFromContext(ctx), format) + l.Warningf(format, HandleLogArgs(args...)...) +} + +// WarningWithContext records Warning errors with context values. +func (l ContextLogger) WarningWithContext(ctx context.Context, args ...any) { + args = append([]any{ConstructLogPrefixFromContext(ctx)}, args...) + l.Warning(args...) +} + +// HandleLogArgs ensures that pointer arguments are dereferenced well. +func HandleLogArgs(args ...any) []any { + a := make([]any, len(args)) + for i, v := range args { + if s, ok := v.(*string); ok && s != nil { + a[i] = *s + } else { + a[i] = v + } + } + return a +} + +// ConstructLogPrefixFromContext constructs log prefix from info in context +func ConstructLogPrefixFromContext(ctx context.Context) string { + return fmt.Sprintf("[RequestID:%s] ", APIRequestID(ctx)) +} + +// RandomString generates a random alpha-numeric string of input length. +func RandomString(length int) string { + charset := "abcdefghijklmnoqrstuvwxyz0123456789" + var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +// CreateAPIRequestID creates a random APIRequestId. +func CreateAPIRequestID() string { + return RandomString(8) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/context.go b/mmv1/third_party/terraform/tpgdclresource/context.go new file mode 100755 index 000000000000..9a79dd9f0bf2 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/context.go @@ -0,0 +1,56 @@ +package tpgdclresource + +import ( + "context" + "fmt" + + glog "github.com/golang/glog" +) + +// ReqCtxKey is the key type for storing values in the context. +// Context requires custom type key. +type ReqCtxKey string + +// Keys used in context Value. +const ( + DoNotLogRequestsKey ReqCtxKey = "DoNotLogRequestsKey" + APIRequestIDKey ReqCtxKey = "APIRequestIDKey" +) + +// APIRequestID returns the RequestID for the API call. +// APIRequestID is supposed to be used in log to help with debugging +// Since we do not want explicit error handling everywhere we are logging, so not throwing error. +// Its okay to print empty requestID in worse scenario. +func APIRequestID(ctx context.Context) string { + val := ctx.Value(APIRequestIDKey) + if val == nil { + return "" + } + requestID, ok := val.(string) + if !ok { + glog.Warning("Could not convert APIRequestID val to string") + return "" + } + return requestID +} + +// ShouldLogRequest returns true if the request should be logged. +func ShouldLogRequest(ctx context.Context) (bool, error) { + val := ctx.Value(DoNotLogRequestsKey) + if val == nil { + return true, nil + } + doNotLog, ok := val.(bool) + if !ok { + return false, fmt.Errorf("could not convert DoNotLogRequests value to bool") + } + return !doNotLog, nil +} + +// ContextWithRequestID adds APIRequestID to ctx if APIRequestID is not present. +func ContextWithRequestID(ctx context.Context) context.Context { + if APIRequestID(ctx) != "" { + return ctx + } + return context.WithValue(ctx, APIRequestIDKey, CreateAPIRequestID()) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/dcl.go b/mmv1/third_party/terraform/tpgdclresource/dcl.go index ad5c5f2ce219..d960e170b6dd 100644 --- a/mmv1/third_party/terraform/tpgdclresource/dcl.go +++ b/mmv1/third_party/terraform/tpgdclresource/dcl.go @@ -1,20 +1,19 @@ package tpgdclresource import ( - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" ) var ( // CreateDirective restricts Apply to creating resources for Create - CreateDirective = []dcl.ApplyOption{ - dcl.WithLifecycleParam(dcl.BlockAcquire), - dcl.WithLifecycleParam(dcl.BlockDestruction), - dcl.WithLifecycleParam(dcl.BlockModification), + CreateDirective = []ApplyOption{ + WithLifecycleParam(BlockAcquire), + WithLifecycleParam(BlockDestruction), + WithLifecycleParam(BlockModification), } // UpdateDirective restricts Apply to modifying resources for Update - UpdateDirective = []dcl.ApplyOption{ - dcl.WithLifecycleParam(dcl.BlockCreation), - dcl.WithLifecycleParam(dcl.BlockDestruction), + UpdateDirective = []ApplyOption{ + WithLifecycleParam(BlockCreation), + WithLifecycleParam(BlockDestruction), } ) diff --git a/mmv1/third_party/terraform/tpgdclresource/declarative.go b/mmv1/third_party/terraform/tpgdclresource/declarative.go new file mode 100755 index 000000000000..8362540aeffa --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/declarative.go @@ -0,0 +1,370 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/kylelemons/godebug/pretty" +) + +// LifecycleParam is used to specify what operations are acceptable. +type LifecycleParam int + +const ( + // BlockDestruction blocks deleting the resource. This will + // cause some Apply operations to be impossible - some fields + // cannot be modified and require a destroy/recreate. + BlockDestruction LifecycleParam = iota + // BlockAcquire will error if the resource already exists. + BlockAcquire + // BlockCreation will error if the resource does not exist. + BlockCreation + // BlockModification will error if the resource is not in the desired state. + BlockModification + // IgnoreIfMissing does not create (and does not error) if the resource + // does not exist. + IgnoreIfMissing + // NoopOnDestroy does not destroy the resource, even if Delete() is + // called. + NoopOnDestroy +) + +// HasLifecycleParam returns whether the given slice has the requested param. +func HasLifecycleParam(lps []LifecycleParam, p LifecycleParam) bool { + for _, lp := range lps { + if lp == p { + return true + } + } + return false +} + +// SprintResourceCompact prints a struct into a compact single line string. +func SprintResourceCompact(v any) string { + prettyConfig := &pretty.Config{ + Compact: true, + IncludeUnexported: true, + } + return prettyConfig.Sprint(v) +} + +// SprintResource prints a struct into a multiline string to display to readers. +func SprintResource(v any) string { + prettyConfig := &pretty.Config{ + Diffable: true, // add line between braces and first/last val + IncludeUnexported: true, + } + return prettyConfig.Sprint(v) +} + +// EmptyValue returns an empty value to exclude PARAMETER-type values from +// being expanded +func EmptyValue() (map[string]any, error) { + return nil, nil +} + +/* + * + * Default Flatten functions + * + * Flatten functions are expected to return the value stored in the interface{}, + * returning a zero value otherwise. For primitive types and arrays that's a nil, + * but for types like time.Time it may be an empty object. + * + * Flattens need to consider the behaviour of https://developers.google.com/discovery/v1/type-format + * and of https://golang.org/pkg/encoding/json/#Unmarshal. As well, values may + * get inserted into the JSON map with their correct types already by decoders. + */ + +// FlattenInteger turns an interface pointing to an arbitary type into *in64, +// taking into account that it may have been represented as various types per +// https://developers.google.com/discovery/v1/type-format. +// string, float64, int64, and int64 values will return a *int64. +// nil and unrecognised types will return 0. +func FlattenInteger(v any) *int64 { + if v == nil { + return nil + } + // int64 values are represented as strings + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return &intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int64(floatVal) + return &intVal + } + + // ints won't appear per https://golang.org/pkg/encoding/json/#Unmarshal + // but may get inserted by decoders in unmarshalResource + if int32Val, ok := v.(int32); ok { + intVal := int64(int32Val) + return &intVal + } + + if intVal, ok := v.(int64); ok { + return &intVal + } + + if machineIntVal, ok := v.(int); ok { + intVal := int64(machineIntVal) + return &intVal + } + + return Int64(0) +} + +// FlattenDouble asserts that an interface is a float64 and returns a pointer to it, +// or to 0.0 if the value is invalid. +func FlattenDouble(v any) *float64 { + if v == nil { + return nil + } + p, ok := v.(float64) + if !ok { + return Float64(0.0) + } + + return &p +} + +// FlattenKeyValuePairs asserts that an interface is a map[string]string and +// returns it, or an empty map if the value is invalid. +func FlattenKeyValuePairs(v any) map[string]string { + if v == nil { + return nil + } + if ss, ok := v.(map[string]string); ok { + return ss + } + p, ok := v.(map[string]any) + if !ok { + return map[string]string{} + } + + return assertStringMap(p) +} + +// FlattenKeyValueInterface returns a pointer to an interface. +// It can only be used for untyped maps. +func FlattenKeyValueInterface(v any) map[string]any { + if v == nil { + return nil + } + + if ss, ok := v.(map[string]any); ok { + return ss + } + + return map[string]any{} +} + +// Returns a map[string]string from a map[string]interface{} +// Non-string values are skipped. +func assertStringMap(mi map[string]any) map[string]string { + ms := make(map[string]string) + for k, v := range mi { + if v == nil { + continue + } + + if s, ok := v.(string); ok { + ms[k] = s + } + } + return ms +} + +// FlattenFloatSlice asserts that an interface is a []float64 and returns +// it. +func FlattenFloatSlice(v any) []float64 { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []float64{} + } + + return assertFloatSlice(p) +} + +// Returns a []float64 from an []interface +// Non-float values are skipped. +func assertFloatSlice(id []any) []float64 { + dd := []float64{} + for _, v := range id { + if v == nil { + continue + } + + if d, ok := v.(float64); ok { + dd = append(dd, d) + } + } + + return dd +} + +// FlattenIntSlice asserts that an interface is a []int and returns +// it. +func FlattenIntSlice(v any) []int64 { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []int64{} + } + + return assertIntSlice(p) +} + +// Returns a []int64 from an []interface +// Non-int values are skipped. +func assertIntSlice(id []any) []int64 { + dd := []int64{} + for _, v := range id { + if v == nil { + continue + } + if f, ok := v.(float64); ok { + dd = append(dd, int64(f)) + continue + } + if d, ok := v.(int64); ok { + dd = append(dd, d) + } + } + + return dd +} + +// FlattenStringSlice asserts that an interface is a []string and returns +// it. +func FlattenStringSlice(v any) []string { + if v == nil { + return nil + } + p, ok := v.([]any) + if !ok { + return []string{} + } + + return assertStringSlice(p) +} + +// Returns a []string from an []interface +// Non-string values are skipped. +func assertStringSlice(is []any) []string { + ss := []string{} + for _, v := range is { + if v == nil { + continue + } + + if s, ok := v.(string); ok { + ss = append(ss, s) + } + } + + return ss +} + +// FlattenString asserts that an interface is a string and returns a pointer to +// it, or to the empty string if the value is invalid. +func FlattenString(v any) *string { + if v == nil { + return nil + } + p, ok := v.(string) + if !ok { + return String("") + } + + return &p +} + +// FlattenBool asserts that an interface is a bool and returns a pointer to it, or +// a pointer to false if the value is invalid. +func FlattenBool(v any) *bool { + if v == nil { + return nil + } + p, ok := v.(bool) + if !ok { + return Bool(false) + } + + return &p +} + +// FlattenTime asserts that an interface is a time.Time and returns it. +// Time values transmitted in JSON will be an RFC3339 time as per +// https://developers.google.com/discovery/v1/type-format +// Otherwise, it returns the empty time. +func FlattenTime(v any) time.Time { + if s, ok := v.(string); ok { + t, err := time.Parse(time.RFC3339, s) + if err == nil { + return t + } + } + + // In case we inject a time.Time in custom code, convert it. + if p, ok := v.(time.Time); ok { + return p + } + + return time.Time{} +} + +// FlattenSecretValue behaves the same way as FlattenString, except that it +// returns nil if the value is not present. +func FlattenSecretValue(v any) *string { + p, ok := v.(string) + if !ok { + return nil + } + + return &p + +} + +// ExtractElementFromList takes in bytes corresponding to a json object of the structure +// { "listKey": [{"foo": "bar", ...}, {"foo": "baz", ...}] } +// and returns the first element for which isElement returns true. +// isElement operates on the serialized json representation of each element - +// to the extent that json.Marshal(json.Unmarshal(X)) != X, this may differ from the +// actual elements in the input bytes - but this should be exclusively +// differences which are not semantically significant in json. +func ExtractElementFromList(b []byte, listKey string, isElement func([]byte) bool) ([]byte, error) { + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + l, ok := m[listKey] + if !ok { + return nil, NotFoundError{Cause: fmt.Errorf("could not find %q in %v, assuming list is empty and returning not found", listKey, m)} + } + list, ok := l.([]any) + if !ok { + return nil, fmt.Errorf("could not convert %v to list", l) + } + for _, v := range list { + if subM, ok := v.(map[string]any); ok { + if subB, err := json.Marshal(subM); err != nil { + continue + } else if isElement(subB) { + return subB, nil + } + } + } + // Return a 404-style error. + return nil, NotFoundError{Cause: fmt.Errorf("could not find a match in %v", list)} +} diff --git a/mmv1/third_party/terraform/tpgdclresource/diff.go b/mmv1/third_party/terraform/tpgdclresource/diff.go new file mode 100755 index 000000000000..fcfe90bede51 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/diff.go @@ -0,0 +1,572 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// DiffInfo is a struct that contains all information about the diff that's about to occur. +type DiffInfo struct { + // Ignore + OutputOnly cause the diff checker to always return no-diff. + Ignore bool + OutputOnly bool + ServerDefault bool + MergeNestedDiffs bool + IgnoredPrefixes []string + Type string + + // ObjectFunction is the function used to diff a Nested Object. + ObjectFunction func(desired, actual any, fn FieldName) ([]*FieldDiff, error) + + // CustomDiff is used to handle diffing a field when normal diff functions will not suffice. + // It should return false if there is any diff between 'desired' and 'actual'. + CustomDiff func(desired, actual any) bool + + // OperationSelector takes in the field's diff and returns the name of the operation (or Recreate) that should be triggered. + OperationSelector func(d *FieldDiff) []string + + EmptyObject any +} + +// FieldName is used to add information about a field's name for logging purposes. +type FieldName struct { + FieldName string +} + +// AddIndex adds an index to a FieldName and returns the same item. +// Info is always pass-by-value, so the original field name still exists. +func (i FieldName) AddIndex(index int) FieldName { + newInfo := i + newInfo.FieldName = newInfo.FieldName + fmt.Sprintf("[%v]", index) + return newInfo +} + +// AddNest adds an index to a FieldName and returns the same item. +// Info is always pass-by-value, so the original field name still exists. +func (i FieldName) AddNest(field string) FieldName { + newInfo := i + if i.FieldName == "" { + newInfo.FieldName = field + } else { + newInfo.FieldName = newInfo.FieldName + fmt.Sprintf(".%s", field) + } + return newInfo +} + +// FieldDiff contains all information about a diff that exists in the resource. +type FieldDiff struct { + FieldName string + Message string + Desired any + Actual any + + ToAdd []any + ToRemove []any + + // The name of the operation that should result (may be Recreate) + // In the case of sets, more than one operation may be returned. + ResultingOperation []string +} + +func (d *FieldDiff) String() string { + if d.Message != "" { + return fmt.Sprintf("Field %s diff: %s", d.FieldName, d.Message) + } else if len(d.ToAdd) != 0 || len(d.ToRemove) != 0 { + return fmt.Sprintf("Field %s: add %v, remove %v", d.FieldName, d.ToAdd, d.ToRemove) + } + return fmt.Sprintf("Field %s: got %s, want %s", d.FieldName, SprintResourceCompact(d.Actual), SprintResourceCompact(d.Desired)) +} + +func stringValue(i any) string { + v := reflect.ValueOf(i) + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return "nil" + } + return fmt.Sprintf("%v", reflect.Indirect(v)) + } + return fmt.Sprintf("%v", i) +} + +// Diff takes in two interfaces and diffs them according to Info. +func Diff(desired, actual any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + // All Output-only fields should not be diffed. + if info.OutputOnly || info.Ignore { + return nil, nil + } + + // If desired is a zero value, we do not care about the field. + if IsZeroValue(desired) { + return nil, nil + } + + if info.OperationSelector == nil { + return nil, fmt.Errorf("an operation selector function must exist") + } + + desiredType := ValueType(desired) + + if desiredType == "invalid" { + return nil, nil + } + + if info.CustomDiff != nil { + if !info.CustomDiff(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if desiredType == "slice" { + dSlice, iSlice, err := slices(desired, actual) + if err != nil { + return nil, err + } + var arrDiffs []*FieldDiff + if info.Type == "Set" { + arrDiffs, err = setDiff(dSlice, iSlice, info, fn) + } else { + arrDiffs, err = arrayDiff(dSlice, iSlice, info, fn) + } + if err != nil { + return nil, err + } + diffs = append(diffs, arrDiffs...) + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if info.Type == "EnumType" { + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + addOperationToDiffs(diffs, info) + return diffs, nil + } + return nil, nil + } + + switch desiredType { + case "string": + dStr, err := str(desired) + if err != nil { + return nil, err + } + + // Protobufs cannot differentiate between empty primitive values + null. + // If the API returns null or does not return a value for the field and we have set the empty string, those are equivalent. + if IsZeroValue(actual) && *dStr == "" { + return diffs, nil + } + + aStr, err := str(actual) + if err != nil { + return nil, err + } + + if info.Type == "ReferenceType" { + if !StringEqualsWithSelfLink(dStr, aStr) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dStr, Actual: aStr}) + } + } else { + if !StringCanonicalize(dStr, aStr) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dStr, Actual: aStr}) + } + } + + case "map": + dMap, aMap, err := maps(desired, actual) + if err != nil { + return nil, err + } + mapDiffs, err := mapCompare(dMap, aMap, info.IgnoredPrefixes, info, fn) + if err != nil { + return nil, err + } + if len(mapDiffs) > 0 { + diffs = append(diffs, mapDiffs...) + } + + case "int64": + dInt, err := makeint64(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dInt == 0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "int": + dInt, err := makeint(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dInt == 0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "float64": + dFloat, err := makefloat64(desired) + if err != nil { + return nil, err + } + + // 0 is the empty value for integers. + if IsZeroValue(actual) && *dFloat == 0.0 { + return diffs, nil + } + + if !reflect.DeepEqual(desired, actual) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + } + + case "bool": + dBool, aBool, err := bools(desired, actual) + if err != nil { + return nil, err + } + if !BoolCanonicalize(dBool, aBool) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: dBool, Actual: aBool}) + } + + case "struct": + // If API returns nil (which means field is unset) && we have the empty-struct, no diff occurs. + if IsZeroValue(actual) && IsEmptyValueIndirect(desired) { + return nil, nil + } + + // Want empty value, but non-empty value currrently exists. + // Only consider *explicitly* empty values, rather than "some combination + // of nils and falses" (as IEVI would do), because of the case comparing + // a non-explicitly empty struct with a struct containing only computed fields. + // See compute's `validate_test.go` for example. + if hasEmptyStructField(desired) && !IsEmptyValueIndirect(actual) { + if info.ServerDefault { + // The API can return values where none are in the desired state. + return nil, nil + } + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual}) + addOperationToDiffs(diffs, info) + return diffs, nil + } + + if info.ObjectFunction == nil { + return nil, fmt.Errorf("struct %v given without an object function", desired) + } + + if info.EmptyObject == nil { + return nil, fmt.Errorf("struct %v given without an empty object type", desired) + } + + // If the API returns nil, we can't diff against a nil. We should use the empty object instead. + // This is because the user could write out a config that is functionally equivalent to the empty object (contains all 0s and ""), + // but is not technically the empty object. + if actual == nil || ValueType(actual) == "invalid" { + actual = info.EmptyObject + } + + ds, err := info.ObjectFunction(desired, actual, fn) + if err != nil { + return nil, err + } + if info.MergeNestedDiffs { + // Replace any nested diffs with a recreate operation with a diff in this field. + nonRecreateCount := 0 + for _, d := range ds { + if len(d.ResultingOperation) == 0 { + return nil, fmt.Errorf("diff found in field %q with no operation", d.FieldName) + } + if d.ResultingOperation[0] != "Recreate" { + ds[nonRecreateCount] = d + nonRecreateCount++ + } + } + if nonRecreateCount < len(ds) { + // At least one nested diff requires a recreate. + ds[nonRecreateCount] = &FieldDiff{FieldName: fn.FieldName, Desired: desired, Actual: actual} + nonRecreateCount++ + } + ds = ds[:nonRecreateCount] + } + diffs = append(diffs, ds...) + default: + return nil, fmt.Errorf("no diffing logic exists for type: %q", desiredType) + } + + addOperationToDiffs(diffs, info) + return diffs, nil +} + +func arrayDiff(desired, actual []any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + + // Nothing to diff against. + if actual == nil { + return diffs, nil + } + + if len(desired) != len(actual) && !IsZeroValue(desired) { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("different lengths: desired %d, actual %d", len(desired), len(actual))}) + return diffs, nil + } + + for i, dItem := range desired { + aItem := actual[i] + diff, err := Diff(dItem, aItem, info, fn.AddIndex(i)) + if err != nil { + return nil, err + } + if diff != nil { + diffs = append(diffs, diff...) + } + } + return diffs, nil +} + +func setDiff(desired, actual []any, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + + // Everything should be added. + if actual == nil { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, ToAdd: desired}) + return diffs, nil + } + + var toAdd, toRemove []any + + for i, aItem := range actual { + found := false + for _, desItem := range desired { + if ds, _ := Diff(desItem, aItem, info, fn.AddIndex(i)); len(ds) == 0 { + found = true + break + } + } + if !found { + toRemove = append(toRemove, aItem) + } + } + + for i, dItem := range desired { + found := false + for _, actItem := range actual { + if ds, _ := Diff(dItem, actItem, info, fn.AddIndex(i)); len(ds) == 0 { + found = true + break + } + } + if !found { + toAdd = append(toAdd, dItem) + } + } + + if len(toAdd) > 0 || len(toRemove) > 0 { + return []*FieldDiff{&FieldDiff{FieldName: fn.FieldName, ToAdd: toAdd, ToRemove: toRemove}}, nil + } + return nil, nil +} + +// ValueType returns the reflect-style kind of an interface or the underlying type of a pointer. +func ValueType(i any) string { + if reflect.ValueOf(i).Kind() == reflect.Ptr { + return reflect.Indirect(reflect.ValueOf(i)).Kind().String() + } + return reflect.ValueOf(i).Kind().String() +} + +func strs(d, i any) (*string, *string, error) { + dStr, err := str(d) + if err != nil { + return nil, nil, err + } + + iStr, err := str(i) + if err != nil { + return nil, nil, err + } + + return dStr, iStr, nil +} + +func str(d any) (*string, error) { + dPtr, dOk := d.(*string) + if !dOk { + dStr, dOk2 := d.(string) + if !dOk2 { + return nil, fmt.Errorf("was given non string %v", d) + } + dPtr = String(dStr) + } + return dPtr, nil +} + +func makeint64(d any) (*int64, error) { + dPtr, dOk := d.(*int64) + if !dOk { + dInt, dOk2 := d.(int64) + if !dOk2 { + return nil, fmt.Errorf("was given non int64 %v", d) + } + dPtr = Int64(dInt) + } + return dPtr, nil +} + +func makeint(d any) (*int, error) { + dPtr, dOk := d.(*int) + if !dOk { + dInt, dOk2 := d.(int) + if !dOk2 { + return nil, fmt.Errorf("was given non int %v", d) + } + dPtr = &dInt + } + return dPtr, nil +} + +func makefloat64(d any) (*float64, error) { + dPtr, dOk := d.(*float64) + if !dOk { + dFloat, dOk2 := d.(float64) + if !dOk2 { + return nil, fmt.Errorf("was given non float64 %v", d) + } + dPtr = &dFloat + } + return dPtr, nil +} + +func bools(d, i any) (*bool, *bool, error) { + dBool, err := boolean(d) + if err != nil { + return nil, nil, err + } + + iBool, err := boolean(i) + if err != nil { + return nil, nil, err + } + + return dBool, iBool, nil +} + +func boolean(d any) (*bool, error) { + dPtr, dOk := d.(*bool) + if !dOk { + return nil, nil + } + return dPtr, nil +} + +func maps(d, a any) (map[string]any, map[string]any, error) { + dMap, _ := mapCast(d) + aMap, _ := mapCast(a) + return dMap, aMap, nil +} + +func mapCast(m any) (map[string]any, error) { + j, err := json.Marshal(m) + if err != nil { + return nil, err + } + + var mi map[string]any + json.Unmarshal(j, &mi) + return mi, nil +} + +func slices(d, i any) ([]any, []any, error) { + dSlice, err := slice(d) + if err != nil { + return nil, nil, err + } + + iSlice, err := slice(i) + if err != nil { + return nil, nil, err + } + + return dSlice, iSlice, nil +} + +func slice(slice any) ([]any, error) { + // Keep the distinction between nil and empty slice input + // This isn't going to be an error though. + if slice == nil { + return nil, nil + } + + s := reflect.ValueOf(slice) + + ret := make([]any, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, nil +} + +func addOperationToDiffs(fds []*FieldDiff, i DiffInfo) { + for _, fd := range fds { + // Do not overwrite update operations on nested fields with parent field operations. + if len(fd.ResultingOperation) == 0 { + fd.ResultingOperation = i.OperationSelector(fd) + } + } +} + +func mapCompare(d, a map[string]any, ignorePrefixes []string, info DiffInfo, fn FieldName) ([]*FieldDiff, error) { + var diffs []*FieldDiff + for k, v := range d { + if isIgnored(k, ignorePrefixes) { + continue + } + + av, ok := a[k] + if !ok { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("%v is missing from actual", k)}) + continue + } + + objDiffs, err := Diff(v, av, info, fn) + if err != nil { + return nil, err + } + diffs = append(diffs, objDiffs...) + } + + for k, v := range a { + if isIgnored(k, ignorePrefixes) { + continue + } + + dv, ok := d[k] + if !ok { + diffs = append(diffs, &FieldDiff{FieldName: fn.FieldName, Message: fmt.Sprintf("%v is missing from desired", k)}) + continue + } + + objDiffs, err := Diff(dv, v, info, fn) + if err != nil { + return nil, err + } + diffs = append(diffs, objDiffs...) + } + + return diffs, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/diff_utils.go b/mmv1/third_party/terraform/tpgdclresource/diff_utils.go new file mode 100755 index 000000000000..e650b6c90252 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/diff_utils.go @@ -0,0 +1,11 @@ +package tpgdclresource + +// RequiresRecreate is for Operations that require recreating. +func RequiresRecreate() func(d *FieldDiff) []string { + return func(d *FieldDiff) []string { return []string{"Recreate"} } +} + +// TriggersOperation is used to tell the diff checker to trigger an operation. +func TriggersOperation(op string) func(d *FieldDiff) []string { + return func(d *FieldDiff) []string { return []string{op} } +} diff --git a/mmv1/third_party/terraform/tpgdclresource/errors.go b/mmv1/third_party/terraform/tpgdclresource/errors.go new file mode 100755 index 000000000000..8eb110c0f169 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/errors.go @@ -0,0 +1,142 @@ +package tpgdclresource + +import ( + "fmt" + "time" + + "google.golang.org/api/googleapi" +) + +// NotFoundError is returned when a resource does not exist. +// Some APIs will also return it if a resource may exist but +// the current user does not have permission to view it. +// It wraps an error, usually a *googleapi.Error. +// It maps to HTTP 404. +type NotFoundError struct { + Cause error +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf("not found: %s", e.Cause) +} + +// HasCode returns true if the given error is an HTTP response with the given code. +func HasCode(err error, code int) bool { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == code { + return true + } + } + return false +} + +// IsNotFound returns true if the given error is a NotFoundError or is an HTTP 404. +func IsNotFound(err error) bool { + if _, ok := err.(NotFoundError); ok { + return true + } + return HasCode(err, 404) +} + +// IsNotFoundOrCode returns true if the given error is a NotFoundError, an HTTP 404, +// or an HTTP response with the given code. +func IsNotFoundOrCode(err error, code int) bool { + return IsNotFound(err) || HasCode(err, code) +} + +// EnumInvalidError is returned when an enum is set (by a client) to a string +// value that is not valid for that enum. +// It maps to HTTP 400, although it is usually generated client-side before +// the enum is sent to the server. +type EnumInvalidError struct { + Enum string + Value string + Valid []string +} + +func (e EnumInvalidError) Error() string { + return fmt.Sprintf("%s not a valid %s (%v)", e.Value, e.Enum, e.Valid) +} + +// NotDeletedError is returned when the resource should be deleted but has not +// been. It is returned if the operation to delete the resource has apparently +// been successful, but Get() still fetches the resource successfully. +type NotDeletedError struct { + ExistingResource any +} + +func (e NotDeletedError) Error() string { + return fmt.Sprintf("resource not successfully deleted: %#v.", e.ExistingResource) +} + +// IsRetryableGoogleError returns true if the error is retryable according to the given retryability. +func IsRetryableGoogleError(gerr *googleapi.Error, retryability Retryability, start time.Time) bool { + return retryability.Retryable && retryability.regex.MatchString(gerr.Message) && time.Since(start) < retryability.Timeout +} + +// IsRetryableHTTPError returns true if the error is retryable - in GCP that's a 500, 502, 503, or 429. +func IsRetryableHTTPError(err error, retryability map[int]Retryability, start time.Time) bool { + if gerr, ok := err.(*googleapi.Error); ok { + rtblt, ok := retryability[gerr.Code] + return ok && IsRetryableGoogleError(gerr, rtblt, start) + } + return false +} + +// IsNonRetryableHTTPError returns true if we know that the error is not retryable - in GCP that's a 400, 403, 404, or 409. +func IsNonRetryableHTTPError(err error, retryability map[int]Retryability, start time.Time) bool { + if gerr, ok := err.(*googleapi.Error); ok { + rtblt, ok := retryability[gerr.Code] + return ok && !IsRetryableGoogleError(gerr, rtblt, start) + } + return false +} + +// IsConflictError returns true if the error has conflict error code 409. +func IsConflictError(err error) bool { + if gerr, ok := err.(*googleapi.Error); ok { + return gerr.Code == 409 + } + return false +} + +// ApplyInfeasibleError is returned when lifecycle directives prevent an Apply from proceeding. +// This error means that no imperative requests were issued. +type ApplyInfeasibleError struct { + Message string +} + +func (e ApplyInfeasibleError) Error() string { + return e.Message +} + +// DiffAfterApplyError is returned when there are differences between the desired state and the +// intended state after Apply completes. This usually indicates an error in the SDK, probably +// related to a failure to canonicalize properly. +type DiffAfterApplyError struct { + Diffs []string +} + +func (e DiffAfterApplyError) Error() string { + return fmt.Sprintf("diffs exist after apply: %v", e.Diffs) +} + +// OperationNotDone is returned when an API operation hasn't completed. +// It may wrap an error if the error means that the operation can be retried. +type OperationNotDone struct { + Err error +} + +func (e OperationNotDone) Error() string { + return "operation not done." +} + +// AttemptToIndexNilArray is returned when GetMapEntry is called with a path that includes an array +// index and that array is unset in the map. +type AttemptToIndexNilArray struct { + FieldName string +} + +func (e AttemptToIndexNilArray) Error() string { + return fmt.Sprintf("field %s was nil, could not index array", e.FieldName) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/flatten.go b/mmv1/third_party/terraform/tpgdclresource/flatten.go new file mode 100755 index 000000000000..6d58d5b6863e --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/flatten.go @@ -0,0 +1,70 @@ +package tpgdclresource + +import ( + "strings" +) + +// SelfLinkToName returns the element of a string after the last slash. +func SelfLinkToName(v *string) *string { + if v == nil { + return nil + } + val := *v + comp := strings.Split(val, "/") + ret := comp[len(comp)-1] + return &ret +} + +// SelfLinkToNameExpander returns the element of a string after the last slash. +// Return value also has error since the dcl template requires the expander to return error. +func SelfLinkToNameExpander(v *string) (*string, error) { + return SelfLinkToName(v), nil +} + +// SelfLinkToNameArrayExpander returns the last element of each string in a slice after the last slash. +// Return value also has error since the dcl template requires the expander to return error. +func SelfLinkToNameArrayExpander(v []string) ([]string, error) { + r := make([]string, len(v)) + for i, w := range v { + r[i] = *SelfLinkToName(&w) + } + return r, nil +} + +// FalseToNil returns nil if the pointed-to boolean is 'false' - otherwise returns the pass-in pointer. +func FalseToNil(b *bool) (*bool, error) { + if b != nil && *b == false { + return nil, nil + } + return b, nil +} + +// SelfLinkToNameArray returns a slice of the elements of a slice of strings after the last slash. +func SelfLinkToNameArray(v []string) []string { + var a []string + for _, vv := range v { + ret := SelfLinkToName(&vv) + if ret != nil { + a = append(a, *ret) + } + } + return a +} + +// SelfLinkToNameWithPattern handles when the resource name can have `/` in it +// by matching the pattern. +func SelfLinkToNameWithPattern(v *string, pattern string) *string { + if v == nil { + return nil + } + regex, err := regexFromPattern(pattern) + if err != nil { + // Unable to compile regex, best guess return v + return v + } + matches := regex.FindStringSubmatch(*v) + if len(matches) == 0 { + return v + } + return &matches[len(matches)-1] +} diff --git a/mmv1/third_party/terraform/tpgdclresource/locations.go b/mmv1/third_party/terraform/tpgdclresource/locations.go new file mode 100755 index 000000000000..c842be1dffde --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/locations.go @@ -0,0 +1,23 @@ +package tpgdclresource + +import "regexp" + +// IsRegion returns true if this string refers to a GCP region. +func IsRegion(s *string) bool { + if s == nil { + return false + } + + r := regexp.MustCompile(`^[a-z]+-[a-z]+[0-9]+$`) + return r.MatchString(*s) +} + +// IsZone returns true if this string refers to a GCP zone. +func IsZone(s *string) bool { + if s == nil { + return false + } + + r := regexp.MustCompile(`^[a-z]+-[a-z]+[0-9]+-(ai[0-9]+)?[a-z]+$`) + return r.MatchString(*s) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/marshallers.go b/mmv1/third_party/terraform/tpgdclresource/marshallers.go new file mode 100755 index 000000000000..42540ff8d22a --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/marshallers.go @@ -0,0 +1,333 @@ +package tpgdclresource + +import ( + "encoding/json" + "fmt" + "reflect" + re "regexp" + "strconv" + "strings" + + glog "github.com/golang/glog" +) + +// MoveMapEntry moves the entry at `from` to `to`. `from` and `to` are slices +// of string keys. Each key except the last must refer to a map[string]interface{} +// in m - we will descend into m following those keys. If the maps at the levels +// above the target are empty after the move, they will be deleted. If there +// are no maps along the path to `to`, they will be created. If a map above +// the level of the target is missing, nothing will be done. If the map exists +// but `target` is not present, `nil` will be inserted at `to`. +func MoveMapEntry(m map[string]any, from, to []string) error { + fetch := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the actual final element to move. + for _, idx := range from[:len(from)-1] { + f, ok := fetch[idx] + if !ok { + // Nothing to move, so it's not an error not to move it. + return nil + } + fetch, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("could not fetch %q from %v", idx, fetch) + } + } + value, ok := fetch[from[len(from)-1]] + if !ok { + value = nil + } + delete(fetch, from[len(from)-1]) + if len(to) > 0 { + fetch = m + for _, idx := range to[:len(to)-1] { + f, ok := fetch[idx] + if !ok { + fetch[idx] = make(map[string]any) + f = fetch[idx] + } + fetch, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("%v is not map[string]interface{}", f) + } + } + fetch[to[len(to)-1]] = value + } + return deleteIfEmpty(m, from) +} + +// GetMapEntry returns the value at `path` from `m`, following the same rules as +// `MoveMapEntry` except that a missing map or value is an error. +func GetMapEntry(m map[string]any, path []string) (any, error) { + if len(path) == 0 { + return m, nil + } + fetch := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the element to fetch. + for _, idx := range path[:len(path)-1] { + f, err := mapEntry(fetch, idx) + if err != nil { + return nil, err + } + var ok bool + fetch, ok = f.(map[string]any) + if !ok { + return nil, fmt.Errorf("could not fetch %q from %v", idx, fetch) + } + } + + value, err := mapEntry(fetch, path[len(path)-1]) + if err != nil { + return nil, err + } + return value, nil +} + +// mapEntry grabs item from fetch, and indexes into the array if the [num] notation is present. +func mapEntry(fetch map[string]any, item string) (any, error) { + // Check if we're fetching from an array. + arrayRegexp := re.MustCompile(`\[([0-9]*)\]`) + if arrayRegexp.MatchString(item) { + field := strings.Split(item, "[")[0] + items := arrayRegexp.FindAllStringSubmatch(item, 1) + index, err := strconv.Atoi(items[0][1]) + if err != nil { + return nil, err + } + + f, ok := fetch[field] + if !ok { + return nil, fmt.Errorf("could not find %q in %v", item, fetch) + } + + if f == nil { + return nil, &AttemptToIndexNilArray{FieldName: field} + } + + fetch, ok := f.([]any) + if !ok { + return nil, fmt.Errorf("field %s is a %T, not an array", field, f) + } + + if len(fetch) < index { + return nil, fmt.Errorf("field %s only has %v elements, needs %v", field, len(fetch), index) + } + + return fetch[index], nil + } + + f, ok := fetch[item] + if !ok { + return nil, fmt.Errorf("could not find %q in %v", item, fetch) + } + return f, nil +} + +func deleteIfEmpty(m map[string]any, from []string) error { + if len(from) > 1 { + sub, ok := m[from[0]] + if !ok { + return fmt.Errorf("could not fetch %q from %v", from[0], m) + } + smap, ok := sub.(map[string]any) + if !ok { + glog.Warningf("In deleting empty map while marshalling, %v not map[string]interface{}", sub) + return nil + } + deleteIfEmpty(smap, from[1:]) + } + if len(from) >= 1 { + if sub, ok := m[from[0]]; ok { + if subm, ok := sub.(map[string]any); ok && len(subm) == 0 { + delete(m, from[0]) + } + } + } + return nil +} + +// PutMapEntry inserts `item` at `path` into `m` - the inverse of GetMapEntry. +func PutMapEntry(m map[string]any, path []string, item any) error { + if len(path) == 0 { + return fmt.Errorf("cannot insert value at empty path") + } + put := m + // All elements before the last must point to a map[string]interface{} - + // this ranges over all those elements, so at the end of this loop, we have + // the map which contains the element to fetch. + for _, idx := range path[:len(path)-1] { + f, ok := put[idx] + if !ok { + f = make(map[string]any) + put[idx] = f + } + put, ok = f.(map[string]any) + if !ok { + return fmt.Errorf("could not cast %q from %v as map[string]interface{}", idx, put) + } + } + put[path[len(path)-1]] = item + return nil +} + +// MapFromListOfKeyValues turns a [{"key": k, "value": v}, ...] format-map into a normal string-string map. +// This is useful for a handful of GCP APIs which have chosen to represent maps this way. We +// expect relatively few of these in newer APIs - it is explicitly against https://aip.dev/apps/2717 - +// ("such a map is represented by a normal JSON object"). +// That AIP didn't exist at the time of development of, for instance, Compute v1. +func MapFromListOfKeyValues(rawFetch map[string]any, path []string, keyName, valueName string) (map[string]string, error) { + i, err := GetMapEntry(rawFetch, path) + if err != nil { + // If there's nothing there, it's okay to ignore. + glog.Warningf("In converting a map to [{\"key\": k, ...}, ...] format, no entry at %q in %v", path, rawFetch) + return nil, nil + } + il, ok := i.([]any) + if !ok { + return nil, fmt.Errorf("could not cast %v to []interface{}", i) + } + var items []map[string]any + for _, it := range il { + cast, ok := it.(map[string]any) + if !ok { + return nil, fmt.Errorf("could not cast %v to map[string]interface{}", it) + } + items = append(items, cast) + } + + m := make(map[string]string, len(items)) + for _, item := range items { + key, ok := item[keyName].(string) + if !ok { + return nil, fmt.Errorf("could not find 'key' in %v", item) + } + value, ok := item[valueName].(string) + if !ok { + return nil, fmt.Errorf("could not find 'value' in %v", item) + } + m[key] = value + } + return m, nil +} + +// ListOfKeyValuesFromMap is the opposite of MapFromListOfKeyValues, used in marshalling instead of unmarshalling. +func ListOfKeyValuesFromMap(m map[string]string, keyName, valueName string) ([]map[string]string, error) { + var items []map[string]string + for k, v := range m { + items = append(items, map[string]string{ + keyName: k, + valueName: v, + }) + } + return items, nil +} + +// ListOfKeyValuesFromMapInStruct returns the opposite of MapFromListOfKeyValues, except nested inside an struct under the subfield name. +func ListOfKeyValuesFromMapInStruct(m map[string]string, subfieldName, keyName, valueName string) (map[string][]map[string]string, error) { + maps, err := ListOfKeyValuesFromMap(m, keyName, valueName) + if err != nil { + return nil, err + } + return map[string][]map[string]string{ + subfieldName: maps, + }, nil +} + +// ConvertToMap converts the specified object into the map[string]interface{} which can +// be serialized into the same json object as the input object. +func ConvertToMap(obj any) (map[string]any, error) { + var m map[string]any + b, err := json.Marshal(obj) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +// ValueOrEmptyString takes a scalar or pointer to a scalar and returns either the empty string or its value. +func ValueOrEmptyString(i any) string { + if i == nil { + return "" + } + v := reflect.ValueOf(i) + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.IsValid() { + switch v.Kind() { + case reflect.Bool, reflect.Int, reflect.Int64, reflect.Float64, reflect.String: + return fmt.Sprintf("%v", v.Interface()) + } + } + return "" +} + +// ValueOrEmptyInt64 returns the value or the default value if the pointer is nil. +func ValueOrEmptyInt64(s *int64) int64 { + if s == nil { + return 0 + } + return *s +} + +// ValueOrEmptyBool returns the value or the default value if the pointer is nil. +func ValueOrEmptyBool(s *bool) bool { + if s == nil { + return false + } + return *s +} + +// ValueOrEmptyDouble returns the value or the default value if the pointer is nil. +func ValueOrEmptyDouble(s *float64) float64 { + if s == nil { + return 0.0 + } + return *s +} + +// FindStringInArray returns true if value found in array of strings +func FindStringInArray(s string, items []string) bool { + for _, v := range items { + if v == s { + return true + } + } + return false +} + +// ValueFromRegexOnField assigns val to the regex value on containerVal if val is unset +func ValueFromRegexOnField(fieldName string, val *string, containerVal *string, regex string) (*string, error) { + containerGroupedVal := String("") + // Fetch value from container if the container exists. + if containerVal != nil && *containerVal != "" { + r := re.MustCompile(regex) + m := r.FindStringSubmatch(*containerVal) + if m != nil && len(m) >= 2 { + containerGroupedVal = String(m[1]) + } else if val == nil || *val == "" { + // The regex didn't match and the value doesn't exist. + return nil, fmt.Errorf("%s field parent has no matching values from regex %s in value %s", fieldName, regex, *containerVal) + } + } + + // If value exists + different from what's in container, error. + if val != nil && *val != "" { + if containerGroupedVal != nil && *containerGroupedVal != "" && *containerGroupedVal != *val { + return nil, fmt.Errorf("%s field has conflicting values of %s (from parent) and %s (from self)", fieldName, *containerGroupedVal, *val) + } + } + + // If value does not exist, use the value in container. + if val == nil || *val == "" { + return containerGroupedVal, nil + } + + return val, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/compute.go b/mmv1/third_party/terraform/tpgdclresource/operations/compute.go new file mode 100755 index 000000000000..868ef74f2bfb --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/compute.go @@ -0,0 +1,117 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// ComputeOperation can be parsed from the returned API operation and waited on. +// Based on https://cloud.google.com/compute/docs/reference/rest/v1/regionOperations +type ComputeOperation struct { + ID string `json:"id"` + Error *ComputeOperationError `json:"error"` + SelfLink string `json:"selfLink"` + Status string `json:"status"` + TargetLink string `json:"targetLink"` + TargetID string `json:"targetId"` + // other irrelevant fields omitted + + config *dcl.Config +} + +// ComputeOperationError is the GCE operation's Error body. +type ComputeOperationError struct { + Code int `json:"code"` + Message string `json:"message"` + Errors []*ComputeOperationErrorError `json:"errors"` +} + +// String formats the OperationError as an error string. +func (e *ComputeOperationError) String() string { + if e == nil { + return "nil" + } + var b strings.Builder + for _, err := range e.Errors { + fmt.Fprintf(&b, "error code %q, message: %s\n", err.Code, err.Message) + } + if e.Code != 0 || e.Message != "" { + fmt.Fprintf(&b, "error code %d, message: %s\n", e.Code, e.Message) + } + + return b.String() +} + +// ComputeOperationErrorError is a singular error in a GCE operation. +type ComputeOperationErrorError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// Wait waits for an ComputeOperation to complete by fetching the operation until it completes. +func (op *ComputeOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *ComputeOperation) handleResponse(resp *dcl.RetryDetails, err error) (*dcl.RetryDetails, error) { + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + + if op.Status != "DONE" { + return nil, dcl.OperationNotDone{} + } + + if op.Error != nil { + return nil, fmt.Errorf("operation received error: %v", op.Error) + } + + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *ComputeOperation) FirstResponse() (map[string]any, bool) { + return make(map[string]any), false +} + +func (op *ComputeOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + return op.handleResponse(dcl.SendRequest(ctx, op.config, "GET", op.SelfLink, &bytes.Buffer{}, nil)) +} + +// ComputeGlobalOrganizationOperation can be parsed from the returned API operation and waited on. +// Based on https://cloud.google.com/compute/docs/reference/rest/v1/globalOrganizationOperations +type ComputeGlobalOrganizationOperation struct { + BaseOperation ComputeOperation + Parent string +} + +func (op *ComputeGlobalOrganizationOperation) Wait(ctx context.Context, c *dcl.Config, parent *string) error { + c.Logger.Infof("Waiting on: %v", op) + op.BaseOperation.config = c + + op.Parent = *parent + + return dcl.Do(ctx, op.operate, c.RetryProvider) +} + +func (op *ComputeGlobalOrganizationOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + return op.BaseOperation.handleResponse(dcl.SendRequest(ctx, op.BaseOperation.config, "GET", op.BaseOperation.SelfLink+"?parentId="+op.Parent, &bytes.Buffer{}, nil)) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/crm.go b/mmv1/third_party/terraform/tpgdclresource/operations/crm.go new file mode 100755 index 000000000000..3c86bde2326c --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/crm.go @@ -0,0 +1,123 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// CRMOperation can be parsed from the returned API operation and waited on. +// This is the typical GCP operation. +type CRMOperation struct { + Name string `json:"name"` + Error *CRMOperationError `json:"error"` + Done bool `json:"done"` + Response map[string]any `json:"response"` + Metadata map[string]any `json:"metadata"` + // other irrelevant fields omitted + + config *dcl.Config + basePath string + verb string + version string + + response map[string]any +} + +// CRMOperationError is the GCP operation's Error body. +type CRMOperationError struct { + Code int `json:"code"` + Message string `json:"message"` + Errors []*CRMOperationErrorError `json:"errors"` +} + +// String formats the CRMOperationError as an error string. +func (e *CRMOperationError) String() string { + if e == nil { + return "nil" + } + var b strings.Builder + for _, err := range e.Errors { + fmt.Fprintf(&b, "error code %q, message: %s\n", err.Code, err.Message) + } + if e.Code != 0 || e.Message != "" { + fmt.Fprintf(&b, "error code %d, message: %s\n", e.Code, e.Message) + } + + return b.String() +} + +// CRMOperationErrorError is a singular error in a GCP operation. +type CRMOperationErrorError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// Wait waits for an CRMOperation to complete by fetching the operation until it completes. +func (op *CRMOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + op.basePath = basePath + op.verb = verb + + if len(op.Response) > 0 { + op.response = op.Response + } + + // base CRM resources use the v1 endpoint + op.version = "v1" + + // Tags resources require the v3 endpoint, and DCL merges the two into one Operation handler. Identify + // the operation kind by the "type" returned. + if t, ok := op.Metadata["@type"].(string); ok && strings.HasPrefix(t, "type.googleapis.com/google.cloud.resourcemanager.v3") { + op.version = "v3" + } + + if op.Done { + c.Logger.Infof("Completed operation: %v", op) + return nil + } + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *CRMOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := dcl.URL(op.version+"/"+op.Name, op.basePath, op.config.BasePath, nil) + resp, err := dcl.SendRequest(ctx, op.config, op.verb, u, &bytes.Buffer{}, nil) + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + + if !op.Done { + return nil, dcl.OperationNotDone{} + } + + if op.Error != nil { + return nil, fmt.Errorf("operation received error: %+v", op.Error) + } + + if len(op.response) == 0 && len(op.Response) > 0 { + op.response = op.Response + } + + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *CRMOperation) FirstResponse() (map[string]any, bool) { + return op.response, len(op.response) > 0 +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go b/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go new file mode 100755 index 000000000000..4250fd8f0a63 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go @@ -0,0 +1,68 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// DatastoreOperation can be parsed from the returned API operation and waited on. +type DatastoreOperation struct { + Name string `json:"name"` + Done bool `json:"done"` + Metadata *DatastoreOperationMetadata `json:"metadata"` + Error *DatastoreOperationError `json:"error"` + config *dcl.Config +} + +// DatastoreOperationMetadata is an error in a datastore operation. +type DatastoreOperationMetadata struct { + IndexID string `json:"indexId"` +} + +// DatastoreOperationError is an error in a datastore operation. +type DatastoreOperationError struct { + Code int64 `json:"code"` + Message string `json:"message"` +} + +// Wait waits for an DatastoreOperation to complete by fetching the operation until it completes. +func (op *DatastoreOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *DatastoreOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := dcl.URL(op.Name, "https://datastore.googleapis.com/v1/", op.config.BasePath, nil) + resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, true, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + if !op.Done { + return nil, dcl.OperationNotDone{} + } + if op.Error != nil { + return nil, fmt.Errorf("operation received error: %+v", op.Error) + } + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *DatastoreOperation) FirstResponse() (map[string]any, bool) { + return map[string]any{ + "indexId": op.Metadata.IndexID, + }, false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/dns.go b/mmv1/third_party/terraform/tpgdclresource/operations/dns.go new file mode 100755 index 000000000000..4d82b0be04eb --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/dns.go @@ -0,0 +1,59 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// DNSOperation can be parsed from the returned API operation and waited on. +// This is used for Changes only. +// Project and ManagedZone must be set ahead of time. +type DNSOperation struct { + Status string `json:"status"` + ID string `json:"id"` + Project string + ManagedZone string + // other irrelevant fields omitted + + config *dcl.Config +} + +// Wait waits for an DNSOperation to complete by fetching the operation until it completes. +func (op *DNSOperation) Wait(ctx context.Context, c *dcl.Config, project, managedZone string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + op.ManagedZone = managedZone + op.Project = project + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *DNSOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := fmt.Sprintf("https://dns.googleapis.com/dns/v1/projects/%s/managedZones/%s/changes/%s", op.Project, op.ManagedZone, op.ID) + resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + if op.Status != "done" { + return nil, dcl.OperationNotDone{} + } + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *DNSOperation) FirstResponse() (map[string]any, bool) { + return make(map[string]any), false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/knative.go b/mmv1/third_party/terraform/tpgdclresource/operations/knative.go new file mode 100755 index 000000000000..b0bfe061d4ac --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/knative.go @@ -0,0 +1,84 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// KNativeOperation can be parsed from the returned Service. +type KNativeOperation struct { + Status KNativeOperationStatus `json:"status"` + Metadata KNativeOperationMetadata `json:"metadata"` + // other irrelevant fields omitted + + config *dcl.Config + basePath string + verb string + location string +} + +// KNativeOperationMetadata contains the Labels block. +type KNativeOperationMetadata struct { + SelfLink string `json:"selfLink"` + Labels map[string]string `json:"labels"` +} + +// KNativeOperationStatus contains the Conditions block. +type KNativeOperationStatus struct { + Conditions []KNativeOperationCondition `json:"conditions"` +} + +// KNativeOperationCondition contains the +type KNativeOperationCondition struct { + Type string `json:"type"` + Status string `json:"status"` +} + +// Wait waits for an DNSOperation to complete by fetching the operation until it completes. +func (op *KNativeOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + op.basePath = basePath + op.verb = verb + + location, ok := op.Metadata.Labels["cloud.googleapis.com/location"] + if !ok { + return fmt.Errorf("no location found") + } + op.location = location + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *KNativeOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := fmt.Sprintf("https://%s-run.googleapis.com/%s", op.location, op.Metadata.SelfLink) + resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + + for _, condition := range op.Status.Conditions { + if condition.Type == "Ready" && condition.Status == "True" { + return resp, nil + } + } + return nil, dcl.OperationNotDone{} +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *KNativeOperation) FirstResponse() (map[string]any, bool) { + return make(map[string]any), false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go b/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go new file mode 100755 index 000000000000..e823252ef354 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go @@ -0,0 +1,34 @@ +package operations + +import ( + "context" + "fmt" + "strings" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// MonitoringOperation can be parsed from the returned API operation and waited on. +type MonitoringOperation struct { + Name string `json:"name"` +} + +// Wait waits for an MonitoringOperation to complete by fetching the operation until it completes. +func (op *MonitoringOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + if op.Name != "" { + // Names come in the form "accessPolicies/{{name}}" + parts := strings.Split(op.Name, "/") + op.Name = parts[len(parts)-1] + } + return nil +} + +// FetchName will fetch the operation and return the name of the resource created. +// Monitoring creates resources with machine generated names. +// It must be called after the resource has been created. +func (op *MonitoringOperation) FetchName() (*string, error) { + if op.Name == "" { + return nil, fmt.Errorf("this operation (%s) has no name and probably hasn't been run before", op.Name) + } + return &op.Name, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/operations.go b/mmv1/third_party/terraform/tpgdclresource/operations/operations.go new file mode 100755 index 000000000000..2f3483329523 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/operations.go @@ -0,0 +1,116 @@ +package operations + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "time" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// StandardGCPOperation can be parsed from the returned API operation and waited on. +// This is the typical GCP operation. +type StandardGCPOperation struct { + Name string `json:"name"` + Error *StandardGCPOperationError `json:"error"` + Done bool `json:"done"` + Response map[string]any `json:"response"` + // other irrelevant fields omitted + + config *dcl.Config + basePath string + verb string + + response map[string]any +} + +// StandardGCPOperationError is the GCP operation's Error body. +type StandardGCPOperationError struct { + Errors []*StandardGCPOperationErrorError `json:"errors"` + + StandardGCPOperationErrorError +} + +// String formats the StandardGCPOperationError as an error string. +func (e *StandardGCPOperationError) String() string { + if e == nil { + return "nil" + } + var b strings.Builder + for _, err := range e.Errors { + fmt.Fprintf(&b, "error code %q, message: %s, details: %+v\n", err.Code, err.Message, err.Details) + } + + if e.Code != "" { + fmt.Fprintf(&b, "error code %q, message: %s, details: %+v\n", e.Code, e.Message, e.Details) + } + + return b.String() +} + +// StandardGCPOperationErrorError is a singular error in a GCP operation. +type StandardGCPOperationErrorError struct { + Code json.Number `json:"code"` + Message string `json:"message"` + Details []map[string]any `json:"details"` +} + +// Wait waits for an StandardGCPOperation to complete by fetching the operation until it completes. +func (op *StandardGCPOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { + c.Logger.Infof("Waiting on operation: %v", op) + op.config = c + op.basePath = basePath + op.verb = verb + + if len(op.Response) != 0 { + op.response = op.Response + } + if op.Done { + c.Logger.Infof("Completed operation: %v", op) + return nil + } + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *StandardGCPOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := dcl.URL(op.Name, op.basePath, op.config.BasePath, nil) + resp, err := dcl.SendRequest(ctx, op.config, op.verb, u, &bytes.Buffer{}, nil) + if err != nil { + // Since we don't know when this operation started, we will assume the + // context's timeout applies to all request errors. + if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + + if !op.Done { + return nil, dcl.OperationNotDone{} + } + + if op.Error != nil { + return nil, fmt.Errorf("operation received error: %+v details: %v", op.Error, op.Response) + } + + if len(op.response) == 0 && len(op.Response) != 0 { + op.response = op.Response + } + + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *StandardGCPOperation) FirstResponse() (map[string]any, bool) { + return op.response, len(op.response) > 0 +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go b/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go new file mode 100755 index 000000000000..7a61b4f1c371 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go @@ -0,0 +1,32 @@ +package operations + +import ( + "bytes" + "context" + + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// OSPolicyAssignmentDeleteOperation can be parsed from the returned API operation and waited on. +type OSPolicyAssignmentDeleteOperation struct { + Name string `json:"name"` + + config *dcl.Config +} + +// Wait waits for an OSPolicyAssignmentDeleteOperation to complete by waiting until the operation returns a 404. +func (op *OSPolicyAssignmentDeleteOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + c.Logger.Infof("Waiting on: %q", op.Name) + op.config = c + + return dcl.Do(ctx, op.operate, c.RetryProvider) +} + +func (op *OSPolicyAssignmentDeleteOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + u := dcl.URL(op.Name, "https://osconfig.googleapis.com/v1alpha", op.config.BasePath, nil) + resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) + if dcl.IsNotFound(err) { + return nil, nil + } + return resp, dcl.OperationNotDone{} +} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/sql.go b/mmv1/third_party/terraform/tpgdclresource/operations/sql.go new file mode 100755 index 000000000000..7cb22f7474e6 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/operations/sql.go @@ -0,0 +1,78 @@ +package operations + +import ( + "bytes" + "context" + "time" + + glog "github.com/golang/glog" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" +) + +// SQLOperation can be parsed from the returned API operation and waited on. +type SQLOperation struct { + ID string `json:"id"` + SelfLink string `json:"selfLink"` + Status string `json:"status"` + TargetLink string `json:"targetLink"` + // other irrelevant fields omitted + + config *dcl.Config +} + +// Wait waits for an Operation to complete by fetching the operation until it completes. +func (op *SQLOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + glog.Infof("Waiting on operation: %v", op) + op.config = c + + err := dcl.Do(ctx, op.operate, c.RetryProvider) + c.Logger.Infof("Completed operation: %v", op) + return err +} + +func (op *SQLOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { + resp, err := dcl.SendRequest(ctx, op.config, "GET", op.SelfLink, &bytes.Buffer{}, nil) + if err != nil { + if dcl.IsRetryableRequestError(op.config, err, true, time.Now()) { + return nil, dcl.OperationNotDone{} + } + return nil, err + } + if err := dcl.ParseResponse(resp.Response, op); err != nil { + return nil, err + } + if op.Status != "DONE" { + return nil, dcl.OperationNotDone{} + } + return resp, nil +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *SQLOperation) FirstResponse() (map[string]any, bool) { + return make(map[string]any), false +} + +// SQLCreateCertOperation is the operation used for creating SSL certs. +// They have a different format from other resources and other methods. +type SQLCreateCertOperation struct { + Operation SQLOperation `json:"operation"` + ClientCert struct { + CertInfo map[string]any `json:"certInfo"` + } `json:"clientCert"` + response map[string]any +} + +// Wait waits for an SQLOperation to complete by fetching the operation until it completes. +func (op *SQLCreateCertOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { + return op.Operation.Wait(ctx, c, "", "") +} + +// FirstResponse returns the first response that this operation receives with the resource. +// This response may contain special information. +func (op *SQLCreateCertOperation) FirstResponse() (map[string]any, bool) { + if len(op.ClientCert.CertInfo) > 0 { + return op.ClientCert.CertInfo, true + } + return make(map[string]any), false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/project_id.go b/mmv1/third_party/terraform/tpgdclresource/project_id.go new file mode 100755 index 000000000000..eed01719ba8a --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/project_id.go @@ -0,0 +1,94 @@ +package tpgdclresource + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// This matches either the entire string if it contains no forward slashes or just projects/{project_number}/ if it does. +var projectNumberRegex = regexp.MustCompile(`(^\d+$|projects/\d+|metricsScopes/\d+)`) + +// This matches either the entire string if it contains no forward slashes or just projects/{project_id}/ if it does. +var projectIDRegex = regexp.MustCompile(`(^[^/]+$|projects/[^/]+|metricsScopes/[^/]+)`) + +// ProjectResponse is the response from Cloud Resource Manager. +type ProjectResponse struct { + ProjectID string `json:"projectId"` + ProjectNumber string `json:"projectNumber"` +} + +// FlattenProjectNumbersToIDs converts a project number to project ID. +func FlattenProjectNumbersToIDs(config *Config, fromServer *string) *string { + if fromServer == nil { + return nil + } + // Look for a number somewhere in here. + editedServer := projectNumberRegex.ReplaceAllStringFunc(*fromServer, func(number string) string { + config.Logger.Infof("Preparing to use Cloud Resource Manager to convert %s to project id", number) + + p, err := fetchProjectInfo(config, number) + if err != nil { + config.Logger.Warning(err) + return number + } + + if strings.HasPrefix(number, "projects/") { + p.ProjectID = "projects/" + p.ProjectID + } + if strings.HasPrefix(number, "metricsScopes/") { + p.ProjectID = "metricsScopes/" + p.ProjectID + } + + return p.ProjectID + }) + return &editedServer +} + +var fetchProjectInfo = FetchProjectInfo + +// ExpandProjectIDsToNumbers converts a project ID to a project number. +func ExpandProjectIDsToNumbers(config *Config, fromConfig *string) (*string, error) { + if fromConfig == nil { + return nil, nil + } + + // Look for a project id somewhere in here. + editedConfig := projectIDRegex.ReplaceAllStringFunc(*fromConfig, func(id string) string { + config.Logger.Infof("Preparing to convert %s to project number", id) + + p, err := fetchProjectInfo(config, id) + if err != nil { + config.Logger.Warning(err) + return id + } + + if strings.HasPrefix(id, "projects/") { + p.ProjectNumber = "projects/" + p.ProjectNumber + } + if strings.HasPrefix(id, "metricsScopes/") { + p.ProjectNumber = "metricsScopes/" + p.ProjectNumber + } + + return p.ProjectNumber + }) + return &editedConfig, nil +} + +// FetchProjectInfo returns a ProjectResponse from CloudResourceManager. +func FetchProjectInfo(config *Config, projectIdentifier string) (ProjectResponse, error) { + var p ProjectResponse + trimmedIdentifier := strings.TrimPrefix(projectIdentifier, "projects/") + trimmedIdentifier = strings.TrimPrefix(trimmedIdentifier, "metricsScopes/") + trimmedIdentifier = strings.TrimSuffix(trimmedIdentifier, "/") + retryDetails, err := SendRequest(context.TODO(), config, "GET", "https://cloudresourcemanager.googleapis.com/v1/projects/"+trimmedIdentifier, nil, nil) + if err != nil { + return p, fmt.Errorf("failed to send request for project info using identifier %q: %s", projectIdentifier, err) + } + if err := ParseResponse(retryDetails.Response, &p); err != nil { + return p, fmt.Errorf("failed to parse response %v for project with identifier %q: %s", retryDetails.Response, projectIdentifier, err) + } + + return p, nil +} diff --git a/mmv1/third_party/terraform/tpgdclresource/resource.go b/mmv1/third_party/terraform/tpgdclresource/resource.go new file mode 100755 index 000000000000..064fc27b5e39 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/resource.go @@ -0,0 +1,22 @@ +package tpgdclresource + +type Resource interface { + Describe() ServiceTypeVersion +} + +// ServiceTypeVersion is a tuple that can uniquely identify a +// DCL resource type. +type ServiceTypeVersion struct { + // Service indicates the service to which this resource + // belongs, e.g., "compute". It is roughly analogous to the + // K8S "Group" identifier. + Service string + + // Type identifies the particular type of this resource, + // e.g., "ComputeInstance". It maps to the K8S "Kind". + Type string + + // Version is the DCL version of the resource, e.g., + // "beta" or "ga". + Version string +} diff --git a/mmv1/third_party/terraform/tpgdclresource/retry.go b/mmv1/third_party/terraform/tpgdclresource/retry.go new file mode 100755 index 000000000000..a543804b5916 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/retry.go @@ -0,0 +1,139 @@ +package tpgdclresource + +import ( + "context" + "net/http" + "time" + + "github.com/cenkalti/backoff" + glog "github.com/golang/glog" +) + +// Stop is a value that indicates that no more retries should be attempted. +const Stop time.Duration = -1 + +// BackoffInitialInterval is the default InitialInterval value for Backoff. +const BackoffInitialInterval = 500 * time.Millisecond + +// BackoffMaxInterval is the default MaxInterval value for Backoff. +const BackoffMaxInterval = 30 * time.Second + +// RetryDetails provides information about an operation that a Retry implementation +// can use to make decisions about when or if to perform further requests. +type RetryDetails struct { + Request *http.Request + Response *http.Response +} + +// Operation is a retryable function. Implementations should return nil to indicate +// that the operation has concluded successfully, OperationNotDone to indicate +// that the operation should be retried, and any other error to indicate that a +// non-retryable error has occurred. +type Operation func(ctx context.Context) (*RetryDetails, error) + +// Retry provides an interface for handling retryable operations in a flexible manner. +type Retry interface { + // RetryAfter returns the amount of time that should elapse before an operation is re-run. Returning + // Stop indicates that no more retries should occur, and returning zero indicates that the operation + // should be immediately retried. + RetryAfter(details *RetryDetails) time.Duration +} + +// RetryProvider allows callers to provide custom retry behavior. +type RetryProvider interface { + // New returns an initialized Retry. + New() Retry +} + +// NoRetry is a Retry implementation that will never retry. +type NoRetry struct{} + +// RetryAfter implementation that never retries. +func (n *NoRetry) RetryAfter(_ *RetryDetails) time.Duration { + return Stop +} + +// Reset is a no-op. +func (n *NoRetry) Reset() {} + +// Backoff is a Retry implementation that uses exponential backoff with jitter. +type Backoff struct { + // InitialInterval sets the time interval for the first retry delay. + InitialInterval time.Duration + // MaxInterval is the largest amount of time that should elapse between retries. + MaxInterval time.Duration + + bo *backoff.ExponentialBackOff +} + +// NewBackoff returns a Backoff with sensible defaults set. +func NewBackoff() *Backoff { + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = BackoffMaxInterval + bo.InitialInterval = BackoffInitialInterval + bo.MaxElapsedTime = 0 + return &Backoff{ + bo: bo, + } +} + +// NewBackoffWithOptions returns a Backoff with caller-supplied parameters. +func NewBackoffWithOptions(initialInterval, maxInterval time.Duration) *Backoff { + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = maxInterval + bo.InitialInterval = initialInterval + bo.MaxElapsedTime = 0 + return &Backoff{ + bo: bo, + } +} + +// RetryAfter implementation that uses exponential backoff. +func (n *Backoff) RetryAfter(_ *RetryDetails) time.Duration { + if next := n.bo.NextBackOff(); next != backoff.Stop { + return next + } + return Stop +} + +// BackoffRetryProvider is a default RetryProvider that returns a Backoff. +type BackoffRetryProvider struct{} + +// New returns an initialized Retry. +func (r *BackoffRetryProvider) New() Retry { + return NewBackoff() +} + +// Do performs op as a retryable operation, using retry to determine when and if to retry. +// Do will only continue if a OperationNotDone{} is returned. If op() returns another error +// or no error, Do will finish. +// OperationNotDone{} may have an error inside of it, indicating that it's a retryable error. +func Do(ctx context.Context, op Operation, retryProvider RetryProvider) error { + retry := retryProvider.New() + for { + details, err := op(ctx) + // Responsible for returning nil error too. + if _, ok := err.(OperationNotDone); !ok { + return err + } + + w := retry.RetryAfter(details) + if w == Stop { + if e, ok := err.(OperationNotDone); ok { + if e.Err != nil { + return e.Err + } + } + return OperationNotDone{} + } + + t := time.NewTimer(w) + select { + case <-ctx.Done(): + t.Stop() + glog.Info("retryable operation canceled by context") + return OperationNotDone{} + case <-t.C: + } + } +} diff --git a/mmv1/third_party/terraform/tpgdclresource/schema.go b/mmv1/third_party/terraform/tpgdclresource/schema.go new file mode 100755 index 000000000000..17fe5d167de0 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/schema.go @@ -0,0 +1,211 @@ +package tpgdclresource + +import ( + "fmt" + "strings" +) + +// FieldType is an enum of all the types a field can be. +type FieldType int + +const ( + // UnknownType refers to a Field that does not have a proper type defined. + UnknownType FieldType = iota + // MapType refers to a Field that is a Map (typically from string to string). + MapType + // EnumType refers to a Field that is an Enum. + EnumType + // ArrayType refers to a Field that is an Array of any kind. + ArrayType + // ObjectType refers to a Field that is a dictionary with set subfields. + ObjectType + // ReferenceType refers to a Field that is referencing another GCP resource. + ReferenceType + // DoubleType refers to a Field that is a Double. + DoubleType + // StringType refers to a Field that is a String. + StringType + // TimeType refers to a Field that is a Timestamp. + TimeType + // IntegerType refers to a Field that is an Integer. + IntegerType + // BooleanType refers to a Field that is a Boolean. + BooleanType + // StatusType refers to a Field that is a Status. + StatusType + // ReusedType refers to a Field that does not require additional generation because it + // is the same type as another field already being generated. + ReusedType + // UntypedType refers to a type that has no type (in Go-speak, that's an interface{}). + // This can only be used for untyped maps (in proto-speak, google.protobuf.Struct) and cannot be used anywhere else. + // This will not work properly if used outside of a map. + UntypedType +) + +// Schema is the Entire OpenAPI schema. +type Schema struct { + Info *Info `yaml:"info"` + Paths *Paths `yaml:"paths"` + Components *Components `yaml:"components"` +} + +// ResolveDefinition returns the schema component being referenced. +func (s *Schema) ResolveDefinition(ref string) (*Component, error) { + if after, ok := strings.CutPrefix(ref, "#/components/schemas/"); ok { + if props, ok := s.Components.Schemas[after]; ok { + return props, nil + } + } + return nil, fmt.Errorf("could not resolve reference %q\v", ref) +} + +// Link is a URL plus text that should be displayed to an end user, usually in docs. +type Link struct { + Text string `yaml:"text"` + URL string `yaml:"url"` +} + +// Info is the Info block for the OpenAPI schema. +type Info struct { + Title string `yaml:"title"` + Description string `yaml:"description"` + StructName string `yaml:"x-dcl-struct-name,omitempty"` + HasIAM bool `yaml:"x-dcl-has-iam"` + Mutex string `yaml:"x-dcl-mutex,omitempty"` + Note string `yaml:"x-dcl-note,omitempty"` + Warning string `yaml:"x-dcl-warning,omitempty"` + Reference *Link `yaml:"x-dcl-ref,omitempty"` + Guides []*Link `yaml:"x-dcl-guides,omitempty"` +} + +// ResourceTitle returns the title of this resource. +func (i *Info) ResourceTitle() string { + return strings.Split(i.Title, "/")[1] +} + +// Paths is the Paths block for the OpenAPI schema. +type Paths struct { + Get *Path `yaml:"get"` + Apply *Path `yaml:"apply"` + Delete *Path `yaml:"delete,omitempty"` + DeleteAll *Path `yaml:"deleteAll,omitempty"` + List *Path `yaml:"list,omitempty"` +} + +// Path is the Path used for a method. +type Path struct { + Description string `yaml:"description"` + Parameters []PathParameters `yaml:"parameters"` +} + +// PathParameters is the Parameters for a given Path. +type PathParameters struct { + Name string `yaml:"name"` + Required bool `yaml:"required"` + Description string `yaml:"description,omitempty"` + Schema *PathParametersSchema `yaml:"schema,omitempty"` +} + +// PathParametersSchema is used to store the type. It is typically set to "string" +type PathParametersSchema struct { + Type string `yaml:"type"` +} + +// Components maps a Component name to the Component. +type Components struct { + Schemas map[string]*Component +} + +// Component contains all the information for a component (resource or reused type) +type Component struct { + Title string `yaml:"title,omitempty"` + ID string `yaml:"x-dcl-id,omitempty"` + Locations []string `yaml:"x-dcl-locations,omitempty"` + UsesStateHint bool `yaml:"x-dcl-uses-state-hint,omitempty"` + ParentContainer string `yaml:"x-dcl-parent-container,omitempty"` + LabelsField string `yaml:"x-dcl-labels,omitempty"` + HasCreate bool `yaml:"x-dcl-has-create"` + HasIAM bool `yaml:"x-dcl-has-iam"` + ReadTimeout int `yaml:"x-dcl-read-timeout"` + ApplyTimeout int `yaml:"x-dcl-apply-timeout"` + DeleteTimeout int `yaml:"x-dcl-delete-timeout"` + + // TODO: It appears that reused types are not fully conforming to the same spec as the rest of the components. + // Reused Types seem to follow the property spec, but not the component spec. + // This means that we need to have component "inline" all of the schema property fields to avoid having to override YAML parsing logic. + SchemaProperty Property `yaml:",inline"` +} + +// Property contains all information for a field (i.e. property) +type Property struct { + Type string `yaml:"type,omitempty"` + Format string `yaml:"format,omitempty"` + AdditionalProperties *Property `yaml:"additionalProperties,omitempty"` + Ref string `yaml:"$ref,omitempty"` + GoName string `yaml:"x-dcl-go-name,omitempty"` + GoType string `yaml:"x-dcl-go-type,omitempty"` + ReadOnly bool `yaml:"readOnly,omitempty"` + Description string `yaml:"description,omitempty"` + Immutable bool `yaml:"x-kubernetes-immutable,omitempty"` + Conflicts []string `yaml:"x-dcl-conflicts,omitempty"` + Default any `yaml:"default,omitempty"` + ServerDefault bool `yaml:"x-dcl-server-default,omitempty"` + ServerGeneratedParameter bool `yaml:"x-dcl-server-generated-parameter,omitempty"` + Sensitive bool `yaml:"x-dcl-sensitive,omitempty"` + ForwardSlashAllowed bool `yaml:"x-dcl-forward-slash-allowed,omitempty"` + SendEmpty bool `yaml:"x-dcl-send-empty,omitempty"` + ResourceReferences []*PropertyResourceReference `yaml:"x-dcl-references,omitempty"` + Enum []string `yaml:"enum,omitempty"` + ListType string `yaml:"x-dcl-list-type,omitempty"` + Items *Property `yaml:"items,omitempty"` + Unreadable bool `yaml:"x-dcl-mutable-unreadable,omitempty"` + ExtractIfEmpty bool `yaml:"x-dcl-extract-if-empty,omitempty"` + Required []string `yaml:"required,omitempty"` + Properties map[string]*Property `yaml:"properties,omitempty"` + Deprecated bool `yaml:"x-dcl-deprecated,omitempty"` + OptionalType bool `yaml:"x-dcl-optional-type,omitempty"` + Parameter bool `yaml:"x-dcl-parameter,omitempty"` + HasLongForm bool `yaml:"x-dcl-has-long-form,omitempty"` +} + +// IsOptional returns if the type is an optional type. +func (p *Property) IsOptional() bool { + return p.OptionalType +} + +// TypeEnum returns an enum referring to the type. +func (p *Property) TypeEnum() FieldType { + switch p.Type { + case "string": + if p.GoType != "" && p.GoType != "string" { + return EnumType + } else if len(p.ResourceReferences) > 0 { + return ReferenceType + } + return StringType + case "OptionalString": + return StringType + case "number", "OptionalFloat": + return DoubleType + case "integer", "OptionalInt": + return IntegerType + case "boolean", "OptionalBool": + return BooleanType + case "object": + if p.AdditionalProperties != nil && p.AdditionalProperties.GoType != "" && len(p.AdditionalProperties.Properties) != 0 { + return MapType + } + return ObjectType + case "array": + return ArrayType + } + return UnknownType +} + +// PropertyResourceReference contains all resource reference information. +type PropertyResourceReference struct { + Resource string `yaml:"resource"` + Field string `yaml:"field"` + Format string `yaml:"format,omitempty"` + Parent bool `yaml:"parent,omitempty"` +} diff --git a/mmv1/third_party/terraform/tpgdclresource/strings.go b/mmv1/third_party/terraform/tpgdclresource/strings.go new file mode 100755 index 000000000000..1e1fd51d3830 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/strings.go @@ -0,0 +1,149 @@ +package tpgdclresource + +import ( + "regexp" + "strings" +) + +// Map from initialism -> TitleCase variant +// We can assume camelCase is the same as TitleCase except that we downcase the +// first segment +var initialisms = map[string]string{ + "ai": "AI", + "ip": "IP", + "os": "OS", + "ipv4": "IPv4", + "ipv6": "IPv6", + "oauth": "OAuth", + "oauth2": "OAuth2", + "tpu": "TPU", + "vpc": "VPC", + "v1beta1": "V1Beta1", +} + +// SnakeToTitleCase converts a snake_case string to TitleCase / Go struct case. +func SnakeToTitleCase(s string) string { + return strings.Join(SnakeToTitleParts(s), "") +} + +// SnakeToJSONCase converts a snake_case string to jsonCase / camelCase, for use +// in JSON serialization. +func SnakeToJSONCase(s string) string { + parts := SnakeToTitleParts(s) + if len(parts) > 0 { + parts[0] = strings.ToLower(parts[0]) + } + + return strings.Join(parts, "") +} + +// SnakeToTitleParts returns the parts of a snake_case string titlecased as an +// array, taking into account common initialisms. +func SnakeToTitleParts(s string) []string { + parts := []string{} + segments := strings.Split(s, "_") + for _, seg := range segments { + if v, ok := initialisms[seg]; ok { + parts = append(parts, v) + } else { + if len(seg) < 1 { + continue + } + parts = append(parts, strings.ToUpper(seg[0:1])+seg[1:]) + } + } + + return parts +} + +// SnakeToTitleCasePath converts a resource path from snake to title case. For +// example: foo_bar.baz.qux -> FooBar.Baz.Qux +func SnakeToTitleCasePath(s, sep string) string { + str := []string{} + for _, p := range strings.Split(s, sep) { + str = append(str, SnakeToTitleCase(p)) + } + return strings.Join(str, sep) +} + +// TitleToCamelCasePath converts a resource path from title case to lower title case. +// For example: FooBar.Baz.Qux -> fooBar.baz.qux +func TitleToCamelCasePath(s string) string { + // Lowercase the first character and every character following a . + parts := strings.Split(s, ".") + for i, part := range parts { + parts[i] = strings.ToLower(part[:1]) + part[1:] + } + return strings.Join(parts, ".") +} + +// ProtoCamelCase converts a snake case name to a upper camel case name using the +// go protoc special rules: convert to camel case, except when +// the character following the underscore is a digit; e.g., +// foo_bar_2 -> FooBar_2. +// From: http://google3/net/goa/codegen/names.go;l=14;rcl=294425921 +func ProtoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// TitleToSnakeCase takes in a TitleCase string and returns a snake_case string. +func TitleToSnakeCase(s string) string { + for k, v := range initialisms { + kCap := strings.ToUpper(k[0:1]) + k[1:] + s = strings.Replace(s, v, kCap, -1) + } + str := regexp.MustCompile("(.)([A-Z][a-z]+)").ReplaceAllString(s, "${1}_${2}") + return strings.ToLower(regexp.MustCompile("([a-z0-9])([A-Z])").ReplaceAllString(str, "${1}_${2}")) +} + +// StringSliceContains returns true if the slice ss contains string s. +func StringSliceContains(s string, ss []string) bool { + for _, st := range ss { + if st == s { + return true + } + } + return false +} diff --git a/mmv1/third_party/terraform/tpgdclresource/timestamp.go b/mmv1/third_party/terraform/tpgdclresource/timestamp.go new file mode 100755 index 000000000000..5c268b4b7cf5 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/timestamp.go @@ -0,0 +1,17 @@ +package tpgdclresource + +import ( + "time" +) + +// ProtoToTime converts a string from a DCL proto time string to a time.Time. +func ProtoToTime(s string) time.Time { + // Invalid time values will be picked up downstream. + t, _ := time.Parse(time.RFC3339, s) + return t +} + +// TimeToProto converts a time.Time to a proto time string. +func TimeToProto(t time.Time) string { + return t.Format(time.RFC3339) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go b/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go index 2e60238e3daa..13f79ecfabdb 100644 --- a/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go +++ b/mmv1/third_party/terraform/tpgdclresource/tpgtools_utils.go @@ -5,7 +5,6 @@ import ( "fmt" "log" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -15,7 +14,7 @@ func OldValue(old, new interface{}) interface{} { } func HandleNotFoundDCLError(err error, d *schema.ResourceData, resourceName string) error { - if dcl.IsNotFound(err) { + if IsNotFound(err) { log.Printf("[WARN] Removing %s because it's gone", resourceName) // The resource doesn't exist anymore d.SetId("") @@ -46,3 +45,35 @@ func ResourceContainerAwsNodePoolCustomizeDiffFunc(_ context.Context, diff *sche return nil } + +type DCLLogger struct{} + +// Fatal records Fatal errors. +func (l DCLLogger) Fatal(args ...interface{}) { + log.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l DCLLogger) Fatalf(format string, args ...interface{}) { + log.Fatalf(fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) +} + +// Info records Info errors. +func (l DCLLogger) Info(args ...interface{}) { + log.Print(args...) +} + +// Infof records Info errors with added arguments. +func (l DCLLogger) Infof(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) +} + +// Warningf records Warning errors with added arguments. +func (l DCLLogger) Warningf(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) +} + +// Warning records Warning errors. +func (l DCLLogger) Warning(args ...interface{}) { + log.Print(args...) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/transport.go b/mmv1/third_party/terraform/tpgdclresource/transport.go new file mode 100755 index 000000000000..ec10f5545e98 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/transport.go @@ -0,0 +1,274 @@ +package tpgdclresource + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + apihttp "google.golang.org/api/transport/http" +) + +// SendRequest applies the credentials in the provided Config to a request with the specified +// verb, url, and body. It returns the Response from the server if the request returns a +// 2XX success code, or the *googleapi.Error if it returns any other code. The retry is +// optional; if supplied HTTP errors that are deemed temporary will be retried according +// to the policy implemented by the retry. +func SendRequest(ctx context.Context, c *Config, verb, url string, body *bytes.Buffer, retryProvider RetryProvider) (*RetryDetails, error) { + hdrs := http.Header{} + for h, v := range c.header { + for _, s := range v { + hdrs.Add(h, s) + } + } + hdrs.Set("User-Agent", c.UserAgent()) + hdrs.Set("Content-Type", c.contentType) + + u, err := AddQueryParams(url, c.queryParams) + if err != nil { + return nil, err + } + + hasUserProjectOverride, billingProject := UserProjectOverride(c, u) + if hasUserProjectOverride { + hdrs.Set("X-Goog-User-Project", billingProject) + } + + mtls, err := GetMTLSEndpoint(u) + if err != nil { + return nil, err + } + + options := []option.ClientOption{ + option.WithScopes(Scopes...), + internaloption.WithDefaultEndpoint(u), + internaloption.WithDefaultMTLSEndpoint(mtls), + } + for _, o := range c.clientOptions { + options = append(options, o) + } + + httpClient, endpoint, err := apihttp.NewClient(ctx, options...) + if err != nil { + return nil, err + } + if endpoint != "" { + u = endpoint + } + + if _, ok := httpClient.Transport.(loggingTransport); !ok { + // In cases where the config has been created using WithHTTPClient() we want to + // replace the default transport with our logging transport only once. + httpClient = &http.Client{ + Transport: loggingTransport{ + underlyingTransport: httpClient.Transport, + logger: c.Logger, + }, + CheckRedirect: httpClient.CheckRedirect, + Jar: httpClient.Jar, + Timeout: httpClient.Timeout, + } + } + + if body == nil { + // A nil value indicates an empty request body. + body = &bytes.Buffer{} + } + bodyBytes := body.Bytes() + req, err := http.NewRequestWithContext(ctx, verb, u, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, err + } + req.Header = hdrs + + var res *http.Response + if retryProvider == nil { + res, err = httpClient.Do(req) + if err != nil { + return nil, err + } + err = googleapi.CheckResponse(res) + if err != nil { + // If this is an error, we will not be returning the + // body, so we should close it. + googleapi.CloseBody(res) + return nil, err + } + return &RetryDetails{Request: req, Response: res}, nil + } + + // The start time of request retries is used to determine if an HTTP error is still retryable. + start := time.Now() + err = Do(ctx, func(ctx context.Context) (*RetryDetails, error) { + // Reset req body before http call. + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + res, err = httpClient.Do(req) + if err != nil { + return nil, err + } + if err := googleapi.CheckResponse(res); err != nil { + // If this is an error, we will not be returning the + // body, so we should close it. + googleapi.CloseBody(res) + if IsRetryableRequestError(c, err, false, start) { + return nil, OperationNotDone{Err: err} + } + return nil, err + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + return &RetryDetails{Request: req.Clone(ctx), Response: res}, err + }, retryProvider) + if err != nil { + return nil, err + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + return &RetryDetails{Request: req, Response: res}, nil +} + +// AddQueryParams adds the specified query parameters to the specified url. +func AddQueryParams(rawurl string, params map[string]string) (string, error) { + u, err := url.Parse(rawurl) + if err != nil { + return "", err + } + q := u.Query() + for k, v := range params { + q.Set(k, v) + } + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ParseResponse reads a JSON response into a Go struct +func ParseResponse(resp *http.Response, ptr any) error { + defer resp.Body.Close() + return json.NewDecoder(resp.Body).Decode(ptr) +} + +// IsRetryableRequestError returns true if an error is determined to be +// a common retryable error based on heuristics about GCP API behaviours. +// The start time is used to determine if errors with custom timeouts should be retried. +func IsRetryableRequestError(c *Config, err error, retryNotFound bool, start time.Time) bool { + // Return transient errors that should be retried. + if IsRetryableHTTPError(err, c.codeRetryability, start) || (retryNotFound && IsNotFound(err)) { + c.Logger.Infof("Error appears retryable: %s", err) + return true + } + + if IsNonRetryableHTTPError(err, c.codeRetryability, start) { + c.Logger.Infof("Error appears not to be retryable: %s", err) + return false + } + + // Assume other errors are retryable. + c.Logger.Warningf("Unexpected HTTP error, assuming retryable: %s", err) + return true +} + +// Nprintf takes in a format string (with format {{key}} instead of %s) and a params map. +// Returns filled string. +func Nprintf(format string, params map[string]any) string { + pq := strings.Split(format, "?") + path := pq[0] + query := "" + if len(pq) == 2 { + query = pq[1] + } else if len(pq) > 2 { + return "error: too many path separators." + } + for key, val := range params { + r := regexp.MustCompile(`{{\s?` + regexp.QuoteMeta(key) + `\s?}}`) + path = r.ReplaceAllString(path, fmt.Sprintf("%v", val)) + } + for key, val := range params { + r := regexp.MustCompile(`{{\s?` + regexp.QuoteMeta(key) + `\s?}}`) + query = r.ReplaceAllString(query, url.QueryEscape(fmt.Sprintf("%v", val))) + } + if query != "" { + return path + "?" + query + } + return path +} + +// URL takes in a partial URL, default base path, optional user-specified base-path and a params map. +func URL(urlpath, basePath, userPath string, params map[string]any) string { + if userPath != "" { + if strings.HasSuffix(userPath, "/") { + userPath = userPath[:len(userPath)-1] + } + return Nprintf(strings.Join([]string{userPath, urlpath}, "/"), params) + } + if before, ok := strings.CutSuffix(basePath, "/"); ok { + basePath = before + } + return Nprintf(strings.Join([]string{basePath, urlpath}, "/"), params) +} + +// ResponseBodyAsJSON reads the response body from a *RetryDetails and returns +// it as unstructured JSON in a map[string]interface{}. +func ResponseBodyAsJSON(retry *RetryDetails) (map[string]any, error) { + defer retry.Response.Body.Close() + b, err := io.ReadAll(retry.Response.Body) + if err != nil { + return nil, err + } + + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + + return m, nil +} + +// GetMTLSEndpoint returns the API endpoint used for mTLS authentication. +func GetMTLSEndpoint(baseEndpoint string) (string, error) { + u, err := url.Parse(baseEndpoint) + if err != nil { + return "", err + } + portParts := strings.Split(u.Host, ":") + if len(portParts) == 0 || portParts[0] == "" { + return "", fmt.Errorf("api endpoint %q is missing host", u.String()) + } + domainParts := strings.Split(portParts[0], ".") + if len(domainParts) > 1 { + u.Host = fmt.Sprintf("%s.mtls.%s", domainParts[0], strings.Join(domainParts[1:], ".")) + } else { + u.Host = fmt.Sprintf("%s.mtls", domainParts[0]) + } + if len(portParts) > 1 { + u.Host = fmt.Sprintf("%s:%s", u.Host, portParts[1]) + } + return u.String(), nil +} + +// UserProjectOverride returns true if user project override should be used and the project that should be set. +func UserProjectOverride(c *Config, url string) (bool, string) { + if !c.userOverrideProject { + return false, "" + } + + if c.billingProject != "" { + return true, c.billingProject + } + + r := regexp.MustCompile(`projects/([a-z0-9A-Z-:_]*)/`) + g := r.FindStringSubmatch(url) + if g != nil && len(g) > 1 { + return true, g[1] + } + + // This URL does not contain a project and no project was found in the URL. + // This most likely means a non-project resource was used accidentally. + return false, "" +} diff --git a/mmv1/third_party/terraform/tpgdclresource/type.go b/mmv1/third_party/terraform/tpgdclresource/type.go new file mode 100755 index 000000000000..c7de8454f3fa --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/type.go @@ -0,0 +1,51 @@ +package tpgdclresource + +// Bool converts a bool to a *bool +func Bool(b bool) *bool { + return &b +} + +// Float64 converts a float64 to *float64 +func Float64(f float64) *float64 { + return &f +} + +// Float64OrNil converts a float64 to *float64, returning nil if it's empty (0.0). +func Float64OrNil(f float64) *float64 { + if f == 0.0 { + return nil + } + return &f +} + +// Int64 converts an int64 to *int64 +func Int64(i int64) *int64 { + return &i +} + +// Int64OrNil converts an int64 to *int64, returning nil if it's empty (0). +func Int64OrNil(i int64) *int64 { + if i == 0 { + return nil + } + return &i +} + +// String converts a string to a *string +func String(s string) *string { + return &s +} + +// StringWithError converts a string to a *string, returning a nil error to +// satisfy type signatures that expect one. +func StringWithError(s string) (*string, error) { + return &s, nil +} + +// StringOrNil converts a string to a *string, returning nil if it's empty (""). +func StringOrNil(s string) *string { + if s == "" { + return nil + } + return &s +} diff --git a/mmv1/third_party/terraform/tpgdclresource/update.go b/mmv1/third_party/terraform/tpgdclresource/update.go new file mode 100755 index 000000000000..06e4d0f0673c --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/update.go @@ -0,0 +1,106 @@ +package tpgdclresource + +import ( + "fmt" + "regexp" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "bitbucket.org/creachadair/stringset" +) + +// UpdateMask creates a Update Mask string according to https://google.aip.dev/161 +func UpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + ss = append(ss, convertUpdateMaskVal(v.FieldName)) + } + + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +func titleCaseToCamelCase(s string) string { + r, n := utf8.DecodeRuneInString(s) + p := string(unicode.ToLower(r)) + p = p + s[n:] + return p +} + +// Diffs come in the form Http.AuthInfo.Password +// Needs to be in the form http.authInfo.password +func convertUpdateMaskVal(s string) string { + r := regexp.MustCompile(`\[\d\]`) + + // camelCase string (right now, it's in TitleCase). + parts := strings.Split(s, ".") + var p []string + for _, q := range parts { + if r.MatchString(q) { + // Indexing into a repeated field. + bareFieldName := r.ReplaceAllString(q, "") + p = append(p, titleCaseToCamelCase(bareFieldName)) + + // Repeated fields cannot be intermediary in a field mask, so we + // must terminate the field mask here. + break + } else { + p = append(p, titleCaseToCamelCase(q)) + } + } + + // * notation should only be used if this is not the last field. + // Example: res.array.* should be res.array, but res.array.*.bar means "update only bar in all my array fields" + if p[len(p)-1] == "*" { + p = p[0 : len(p)-1] + } + + return strings.Join(p, ".") +} + +// TopLevelUpdateMask returns only the top-level fields. +func TopLevelUpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + part := strings.Split(v.FieldName, ".")[0] + ss = append(ss, convertUpdateMaskVal(part)) + } + + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +// SnakeCaseUpdateMask returns the update mask, but all fields are snake case. +func SnakeCaseUpdateMask(ds []*FieldDiff) string { + var ss []string + for _, v := range ds { + ss = append(ss, TitleToSnakeCase(convertUpdateMaskVal(v.FieldName))) + } + dupesRemoved := stringset.New(ss...).Elements() + + // Sorting the entries is optional, but makes it easier to read + test. + sort.Strings(dupesRemoved) + return strings.Join(dupesRemoved, ",") +} + +// UpdateMaskWithPrefix returns a Standard Update Mask with a prefix attached. +func UpdateMaskWithPrefix(ds []*FieldDiff, prefix string) string { + um := UpdateMask(ds) + parts := strings.Split(um, ",") + + var ss []string + + for _, part := range parts { + ss = append(ss, fmt.Sprintf("%s.%s", prefix, part)) + } + + return strings.Join(ss, ",") +} diff --git a/mmv1/third_party/terraform/tpgdclresource/utils.go b/mmv1/third_party/terraform/tpgdclresource/utils.go new file mode 100755 index 000000000000..f20709050e4d --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/utils.go @@ -0,0 +1,16 @@ +package tpgdclresource + +import ( + "time" + + "github.com/google/go-cpy/cpy" +) + +// Copy makes a deep copy of an interface. +func Copy(src any) any { + copier := cpy.New( + cpy.Shallow(time.Time{}), + cpy.IgnoreAllUnexported(), + ) + return copier.Copy(src) +} diff --git a/mmv1/third_party/terraform/tpgdclresource/validate.go b/mmv1/third_party/terraform/tpgdclresource/validate.go new file mode 100755 index 000000000000..6ff0fbbdffc4 --- /dev/null +++ b/mmv1/third_party/terraform/tpgdclresource/validate.go @@ -0,0 +1,69 @@ +package tpgdclresource + +import ( + "fmt" + "strings" +) + +// Required takes in a DCL resource (represented as an interface) and a dot-notated path (with JSON names). If the path is not set, an error will be returned. +func Required(r any, path string) error { + var m map[string]any + m, err := ConvertToMap(r) + if err != nil { + return err + } + val, err := GetMapEntry(m, strings.Split(path, ".")) + if err != nil { + return err + } else if val == nil { + return fmt.Errorf("required value %q could not be found", path) + } + return nil +} + +// RequiredParameter takes in a value (typically one that's not exported in JSON) and returns an error if it is not set. +func RequiredParameter(v any, name string) error { + if IsZeroValue(v) { + return fmt.Errorf("%q must be set", name) + } + return nil +} + +func countOfNonempty(v ...any) int { + i := 0 + for _, val := range v { + if !IsEmptyValueIndirect(val) { + i++ + } + } + return i +} + +// ValidateAtMostOneOfFieldsSet returns an error if more than one of the provided values is nonempty. +func ValidateAtMostOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) > 1 { + return fmt.Errorf("more than one value set: %v", fieldNames) + } + return nil +} + +// ValidateAtLeastOneOfFieldsSet returns an error if none of the provided values is nonempty. +func ValidateAtLeastOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) == 0 { + return fmt.Errorf("zero values set: %v", fieldNames) + } + return nil +} + +// ValidateExactlyOneOfFieldsSet returns an error if 0 or 2+ of the provided values is nonempty. +func ValidateExactlyOneOfFieldsSet(fieldNames []string, v ...any) error { + if countOfNonempty(v...) != 1 { + return fmt.Errorf("not exactly one value set: %v", fieldNames) + } + return nil +} + +// AnySet returns true if any of the values provided is nonempty. +func AnySet(v ...any) bool { + return countOfNonempty(v...) > 0 +} diff --git a/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl b/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl deleted file mode 100644 index 8c32d4eca8e3..000000000000 --- a/mmv1/third_party/terraform/tpgresource/tpgtools_custom_flattens.go.tmpl +++ /dev/null @@ -1,39 +0,0 @@ -package tpgresource - -import ( - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws{{ $.DCLVersion }}" - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure{{ $.DCLVersion }}" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -func FlattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if obj == nil { - return nil - } - transformed := make(map[string]interface{}) - - if obj.AutoRepair == nil || obj.Empty() { - transformed["auto_repair"] = false - } else { - transformed["auto_repair"] = obj.AutoRepair - } - - return []interface{}{transformed} -} - -func FlattenContainerAzureNodePoolManagement(obj *containerazure.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if obj == nil { - return nil - } - transformed := make(map[string]interface{}) - - if obj.AutoRepair == nil || obj.Empty() { - transformed["auto_repair"] = false - } else { - transformed["auto_repair"] = obj.AutoRepair - } - - return []interface{}{transformed} -} From 7ddcef157edc2da3bb8a846ddf789ee39662eca4 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Tue, 17 Mar 2026 16:44:39 -0700 Subject: [PATCH 02/13] Clean up templates; get GA building and unit tests passing --- GNUmakefile | 2 +- ...esource_apikeys_key_generated_test.go.tmpl | 6 +- ...esource_assured_workloads_workload.go.tmpl | 8 +- ..._workloads_workload_generated_test.go.tmpl | 14 +- ...y_delivery_pipeline_generated_test.go.tmpl | 16 +- ..._clouddeploy_target_generated_test.go.tmpl | 16 +- .../resource_container_aws_cluster.go.tmpl | 220 +- ...ntainer_aws_cluster_generated_test.go.tmpl | 15 +- .../resource_container_aws_node_pool.go.tmpl | 184 +- ...ainer_aws_node_pool_generated_test.go.tmpl | 15 +- ...tainer_azure_client_generated_test.go.tmpl | 17 +- .../resource_container_azure_cluster.go.tmpl | 204 +- ...ainer_azure_cluster_generated_test.go.tmpl | 17 +- ...resource_container_azure_node_pool.go.tmpl | 96 +- ...ner_azure_node_pool_generated_test.go.tmpl | 17 +- ...urce_dataplex_asset_generated_test.go.tmpl | 17 +- ...ource_dataplex_lake_generated_test.go.tmpl | 17 +- ...ource_dataplex_zone_generated_test.go.tmpl | 17 +- .../dataproc/autoscaling_policy.go.tmpl | 571 - .../autoscaling_policy_internal.go.tmpl | 2102 -- .../autoscaling_policy_schema.go.tmpl | 250 - .../services/dataproc/cluster.go.tmpl | 3457 --- .../dataproc/cluster_internal.go.tmpl | 18408 ---------------- .../services/dataproc/cluster_schema.go.tmpl | 1941 -- .../services/dataproc/dataproc_utils.go.tmpl | 32 - ...esource_dataproc_workflow_template.go.tmpl | 6 +- ...rebaserules_release_generated_test.go.tmpl | 17 +- ...rebaserules_ruleset_generated_test.go.tmpl | 17 +- ...esource_gke_hub_feature_membership.go.tmpl | 294 +- ...ce_gke_hub_feature_membership_test.go.tmpl | 11 +- ...tcha_enterprise_key_generated_test.go.tmpl | 17 +- 31 files changed, 579 insertions(+), 27442 deletions(-) delete mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl diff --git a/GNUmakefile b/GNUmakefile index 0052f3af639a..1439a2bcd41d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -61,7 +61,7 @@ ifneq ($(SKIP_CLEAN),) endif endif -terraform build provider: validate_environment clean-provider mmv1 tpgtools +terraform build provider: validate_environment clean-provider mmv1 @echo "Provider generation process finished for $(VERSION) in $(OUTPUT_PATH)" diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl index 1368e684c5ce..1159bbf09a3a 100644 --- a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/apikeys" ) func TestAccApikeysKey_AndroidKey(t *testing.T) { @@ -432,7 +432,7 @@ func testAccCheckApikeysKeyDestroyProducer(t *testing.T) func(s *terraform.State billingProject = config.BillingProject } - obj := &Key{ + obj := &apikeys.Key{ Name: dcl.String(rs.Primary.Attributes["name"]), DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), @@ -441,7 +441,7 @@ func testAccCheckApikeysKeyDestroyProducer(t *testing.T) func(s *terraform.State Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), } - client := transport_tpg.NewDCLApikeysClient(config, config.UserAgent, billingProject, 0) + client := apikeys.NewDCLApikeysClient(config, config.UserAgent, billingProject, 0) _, err := client.GetKey(context.Background(), obj) if err == nil { return fmt.Errorf("google_apikeys_key still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl index 629256b2857b..ba06bc56c6d5 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload.go.tmpl @@ -423,7 +423,7 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -487,7 +487,7 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -630,7 +630,7 @@ func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interfa if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -684,7 +684,7 @@ func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interfa if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl index 793ec01a19c2..5bc2a6dcf258 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_generated_test.go.tmpl @@ -6,12 +6,12 @@ import ( "strings" "testing" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) {{- if ne $.TargetVersionName "ga" }} @@ -365,23 +365,23 @@ func testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t *testing.T) func(s *t billingProject = config.BillingProject } - obj := &Workload{ - ComplianceRegime: WorkloadComplianceRegimeEnumRef(rs.Primary.Attributes["compliance_regime"]), + obj := &assuredworkloads.Workload{ + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(rs.Primary.Attributes["compliance_regime"]), DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), Location: dcl.String(rs.Primary.Attributes["location"]), Organization: dcl.String(rs.Primary.Attributes["organization"]), BillingAccount: dcl.String(rs.Primary.Attributes["billing_account"]), EnableSovereignControls: dcl.Bool(rs.Primary.Attributes["enable_sovereign_controls"] == "true"), - Partner: WorkloadPartnerEnumRef(rs.Primary.Attributes["partner"]), + Partner: assuredworkloads.WorkloadPartnerEnumRef(rs.Primary.Attributes["partner"]), PartnerServicesBillingAccount: dcl.String(rs.Primary.Attributes["partner_services_billing_account"]), ProvisionedResourcesParent: dcl.String(rs.Primary.Attributes["provisioned_resources_parent"]), ViolationNotificationsEnabled: dcl.Bool(rs.Primary.Attributes["violation_notifications_enabled"] == "true"), CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), - KajEnrollmentState: WorkloadKajEnrollmentStateEnumRef(rs.Primary.Attributes["kaj_enrollment_state"]), + KajEnrollmentState: assuredworkloads.WorkloadKajEnrollmentStateEnumRef(rs.Primary.Attributes["kaj_enrollment_state"]), Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), } - client := transport_tpg.NewDCLAssuredWorkloadsClient(config, config.UserAgent, billingProject, 0) + client := assuredworkloads.NewDCLAssuredWorkloadsClient(config, config.UserAgent, billingProject, 0) _, err := client.GetWorkload(context.Background(), obj) if err == nil { return fmt.Errorf("google_assured_workloads_workload still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl index ba1ec23e9dd0..ddb0a49b441b 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_generated_test.go.tmpl @@ -3,20 +3,16 @@ package clouddeploy_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" -{{- else }} - clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) {{- if eq $.TargetVersionName "ga" }} @@ -744,7 +740,7 @@ func testAccCheckClouddeployDeliveryPipelineDestroyProducer(t *testing.T) func(s UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + client := clouddeploy.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) _, err := client.GetDeliveryPipeline(context.Background(), obj) if err == nil { return fmt.Errorf("google_clouddeploy_delivery_pipeline still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl index ffbc1f8e1e0c..89bf127f12d2 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl @@ -3,20 +3,16 @@ package clouddeploy_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" -{{- else }} - clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) {{- if eq $.TargetVersionName "ga" }} @@ -533,7 +529,7 @@ func testAccCheckClouddeployTargetDestroyProducer(t *testing.T) func(s *terrafor UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) + client := clouddeploy.NewDCLClouddeployClient(config, config.UserAgent, billingProject, 0) _, err := client.GetTarget(context.Background(), obj) if err == nil { return fmt.Errorf("google_clouddeploy_target still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl index 0541d20bc1af..fe0732db0d9d 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster.go.tmpl @@ -8,15 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" -{{- else }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" -{{- end }} - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -670,7 +662,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) return err } - obj := &containeraws.Cluster{ + obj := &Cluster{ Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), AwsRegion: dcl.String(d.Get("aws_region").(string)), ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), @@ -692,7 +684,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := tpgdclresource.CreateDirective + directive := dcl.CreateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -702,7 +694,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -731,7 +723,7 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e return err } - obj := &containeraws.Cluster{ + obj := &Cluster{ Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), AwsRegion: dcl.String(d.Get("aws_region").(string)), ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), @@ -757,7 +749,7 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -767,7 +759,7 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e res, err := client.GetCluster(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAwsCluster %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return dcl.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("authorization", flattenContainerAwsClusterAuthorization(res.Authorization)); err != nil { @@ -845,7 +837,7 @@ func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) return err } - obj := &containeraws.Cluster{ + obj := &Cluster{ Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), AwsRegion: dcl.String(d.Get("aws_region").(string)), ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), @@ -861,7 +853,7 @@ func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) {{- end }} Project: dcl.String(project), } - directive := tpgdclresource.UpdateDirective + directive := dcl.UpdateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -872,7 +864,7 @@ func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -901,7 +893,7 @@ func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) return err } - obj := &containeraws.Cluster{ + obj := &Cluster{ Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), AwsRegion: dcl.String(d.Get("aws_region").(string)), ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), @@ -928,7 +920,7 @@ func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -964,22 +956,22 @@ func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func expandContainerAwsClusterAuthorization(o interface{}) *containeraws.ClusterAuthorization { +func expandContainerAwsClusterAuthorization(o interface{}) *ClusterAuthorization { if o == nil { - return containeraws.EmptyClusterAuthorization + return EmptyClusterAuthorization } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterAuthorization + return EmptyClusterAuthorization } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterAuthorization{ + return &ClusterAuthorization{ AdminUsers: expandContainerAwsClusterAuthorizationAdminUsersArray(obj["admin_users"]), AdminGroups: expandContainerAwsClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), } } -func flattenContainerAwsClusterAuthorization(obj *containeraws.ClusterAuthorization) interface{} { +func flattenContainerAwsClusterAuthorization(obj *ClusterAuthorization) interface{} { if obj == nil || obj.Empty() { return nil } @@ -991,17 +983,17 @@ func flattenContainerAwsClusterAuthorization(obj *containeraws.ClusterAuthorizat return []interface{}{transformed} } -func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []containeraws.ClusterAuthorizationAdminUsers { +func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []ClusterAuthorizationAdminUsers { if o == nil { - return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + return make([]ClusterAuthorizationAdminUsers, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + return make([]ClusterAuthorizationAdminUsers, 0) } - items := make([]containeraws.ClusterAuthorizationAdminUsers, 0, len(objs)) + items := make([]ClusterAuthorizationAdminUsers, 0, len(objs)) for _, item := range objs { i := expandContainerAwsClusterAuthorizationAdminUsers(item) items = append(items, *i) @@ -1010,18 +1002,18 @@ func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []cont return items } -func expandContainerAwsClusterAuthorizationAdminUsers(o interface{}) *containeraws.ClusterAuthorizationAdminUsers { +func expandContainerAwsClusterAuthorizationAdminUsers(o interface{}) *ClusterAuthorizationAdminUsers { if o == nil { - return containeraws.EmptyClusterAuthorizationAdminUsers + return EmptyClusterAuthorizationAdminUsers } obj := o.(map[string]interface{}) - return &containeraws.ClusterAuthorizationAdminUsers{ + return &ClusterAuthorizationAdminUsers{ Username: dcl.String(obj["username"].(string)), } } -func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []containeraws.ClusterAuthorizationAdminUsers) []interface{} { +func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []ClusterAuthorizationAdminUsers) []interface{} { if objs == nil { return nil } @@ -1035,7 +1027,7 @@ func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []containeraws. return items } -func flattenContainerAwsClusterAuthorizationAdminUsers(obj *containeraws.ClusterAuthorizationAdminUsers) interface{} { +func flattenContainerAwsClusterAuthorizationAdminUsers(obj *ClusterAuthorizationAdminUsers) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1046,17 +1038,17 @@ func flattenContainerAwsClusterAuthorizationAdminUsers(obj *containeraws.Cluster return transformed } -func expandContainerAwsClusterAuthorizationAdminGroupsArray(o interface{}) []containeraws.ClusterAuthorizationAdminGroups { +func expandContainerAwsClusterAuthorizationAdminGroupsArray(o interface{}) []ClusterAuthorizationAdminGroups { if o == nil { - return make([]containeraws.ClusterAuthorizationAdminGroups, 0) + return make([]ClusterAuthorizationAdminGroups, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containeraws.ClusterAuthorizationAdminGroups, 0) + return make([]ClusterAuthorizationAdminGroups, 0) } - items := make([]containeraws.ClusterAuthorizationAdminGroups, 0, len(objs)) + items := make([]ClusterAuthorizationAdminGroups, 0, len(objs)) for _, item := range objs { i := expandContainerAwsClusterAuthorizationAdminGroups(item) items = append(items, *i) @@ -1065,18 +1057,18 @@ func expandContainerAwsClusterAuthorizationAdminGroupsArray(o interface{}) []con return items } -func expandContainerAwsClusterAuthorizationAdminGroups(o interface{}) *containeraws.ClusterAuthorizationAdminGroups { +func expandContainerAwsClusterAuthorizationAdminGroups(o interface{}) *ClusterAuthorizationAdminGroups { if o == nil { - return containeraws.EmptyClusterAuthorizationAdminGroups + return EmptyClusterAuthorizationAdminGroups } obj := o.(map[string]interface{}) - return &containeraws.ClusterAuthorizationAdminGroups{ + return &ClusterAuthorizationAdminGroups{ Group: dcl.String(obj["group"].(string)), } } -func flattenContainerAwsClusterAuthorizationAdminGroupsArray(objs []containeraws.ClusterAuthorizationAdminGroups) []interface{} { +func flattenContainerAwsClusterAuthorizationAdminGroupsArray(objs []ClusterAuthorizationAdminGroups) []interface{} { if objs == nil { return nil } @@ -1090,7 +1082,7 @@ func flattenContainerAwsClusterAuthorizationAdminGroupsArray(objs []containeraws return items } -func flattenContainerAwsClusterAuthorizationAdminGroups(obj *containeraws.ClusterAuthorizationAdminGroups) interface{} { +func flattenContainerAwsClusterAuthorizationAdminGroups(obj *ClusterAuthorizationAdminGroups) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1102,21 +1094,21 @@ func flattenContainerAwsClusterAuthorizationAdminGroups(obj *containeraws.Cluste } -func expandContainerAwsClusterControlPlane(o interface{}) *containeraws.ClusterControlPlane { +func expandContainerAwsClusterControlPlane(o interface{}) *ClusterControlPlane { if o == nil { - return containeraws.EmptyClusterControlPlane + return EmptyClusterControlPlane } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlane + return EmptyClusterControlPlane } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlane{ + return &ClusterControlPlane{ AwsServicesAuthentication: expandContainerAwsClusterControlPlaneAwsServicesAuthentication(obj["aws_services_authentication"]), ConfigEncryption: expandContainerAwsClusterControlPlaneConfigEncryption(obj["config_encryption"]), DatabaseEncryption: expandContainerAwsClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), - SubnetIds: tpgdclresource.ExpandStringArray(obj["subnet_ids"]), + SubnetIds: dcl.ExpandStringArray(obj["subnet_ids"]), Version: dcl.String(obj["version"].(string)), {{- if ne $.TargetVersionName "ga" }} InstancePlacement: expandContainerAwsClusterControlPlaneInstancePlacement(obj["instance_placement"]), @@ -1125,13 +1117,13 @@ func expandContainerAwsClusterControlPlane(o interface{}) *containeraws.ClusterC MainVolume: expandContainerAwsClusterControlPlaneMainVolume(obj["main_volume"]), ProxyConfig: expandContainerAwsClusterControlPlaneProxyConfig(obj["proxy_config"]), RootVolume: expandContainerAwsClusterControlPlaneRootVolume(obj["root_volume"]), - SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), + SecurityGroupIds: dcl.ExpandStringArray(obj["security_group_ids"]), SshConfig: expandContainerAwsClusterControlPlaneSshConfig(obj["ssh_config"]), Tags: tpgresource.CheckStringMap(obj["tags"]), } } -func flattenContainerAwsClusterControlPlane(obj *containeraws.ClusterControlPlane) interface{} { +func flattenContainerAwsClusterControlPlane(obj *ClusterControlPlane) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1158,22 +1150,22 @@ func flattenContainerAwsClusterControlPlane(obj *containeraws.ClusterControlPlan } -func expandContainerAwsClusterControlPlaneAwsServicesAuthentication(o interface{}) *containeraws.ClusterControlPlaneAwsServicesAuthentication { +func expandContainerAwsClusterControlPlaneAwsServicesAuthentication(o interface{}) *ClusterControlPlaneAwsServicesAuthentication { if o == nil { - return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + return EmptyClusterControlPlaneAwsServicesAuthentication } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + return EmptyClusterControlPlaneAwsServicesAuthentication } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneAwsServicesAuthentication{ + return &ClusterControlPlaneAwsServicesAuthentication{ RoleArn: dcl.String(obj["role_arn"].(string)), RoleSessionName: dcl.StringOrNil(obj["role_session_name"].(string)), } } -func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *containeraws.ClusterControlPlaneAwsServicesAuthentication) interface{} { +func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *ClusterControlPlaneAwsServicesAuthentication) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1186,21 +1178,21 @@ func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *contai } -func expandContainerAwsClusterControlPlaneConfigEncryption(o interface{}) *containeraws.ClusterControlPlaneConfigEncryption { +func expandContainerAwsClusterControlPlaneConfigEncryption(o interface{}) *ClusterControlPlaneConfigEncryption { if o == nil { - return containeraws.EmptyClusterControlPlaneConfigEncryption + return EmptyClusterControlPlaneConfigEncryption } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlaneConfigEncryption + return EmptyClusterControlPlaneConfigEncryption } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneConfigEncryption{ + return &ClusterControlPlaneConfigEncryption{ KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), } } -func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *containeraws.ClusterControlPlaneConfigEncryption) interface{} { +func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *ClusterControlPlaneConfigEncryption) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1212,21 +1204,21 @@ func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *containeraws.Cl } -func expandContainerAwsClusterControlPlaneDatabaseEncryption(o interface{}) *containeraws.ClusterControlPlaneDatabaseEncryption { +func expandContainerAwsClusterControlPlaneDatabaseEncryption(o interface{}) *ClusterControlPlaneDatabaseEncryption { if o == nil { - return containeraws.EmptyClusterControlPlaneDatabaseEncryption + return EmptyClusterControlPlaneDatabaseEncryption } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlaneDatabaseEncryption + return EmptyClusterControlPlaneDatabaseEncryption } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneDatabaseEncryption{ + return &ClusterControlPlaneDatabaseEncryption{ KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), } } -func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *containeraws.ClusterControlPlaneDatabaseEncryption) interface{} { +func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *ClusterControlPlaneDatabaseEncryption) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1239,7 +1231,7 @@ func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *containeraws. } {{- if ne $.TargetVersionName "ga" }} -func expandContainerAwsClusterControlPlaneInstancePlacement(o interface{}) *containeraws.ClusterControlPlaneInstancePlacement { +func expandContainerAwsClusterControlPlaneInstancePlacement(o interface{}) *ClusterControlPlaneInstancePlacement { if o == nil { return nil } @@ -1248,12 +1240,12 @@ func expandContainerAwsClusterControlPlaneInstancePlacement(o interface{}) *cont return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneInstancePlacement{ - Tenancy: containeraws.ClusterControlPlaneInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + return &ClusterControlPlaneInstancePlacement{ + Tenancy: ClusterControlPlaneInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), } } -func flattenContainerAwsClusterControlPlaneInstancePlacement(obj *containeraws.ClusterControlPlaneInstancePlacement) interface{} { +func flattenContainerAwsClusterControlPlaneInstancePlacement(obj *ClusterControlPlaneInstancePlacement) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1266,7 +1258,7 @@ func flattenContainerAwsClusterControlPlaneInstancePlacement(obj *containeraws.C } {{- end }} -func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *containeraws.ClusterControlPlaneMainVolume { +func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *ClusterControlPlaneMainVolume { if o == nil { return nil } @@ -1275,16 +1267,16 @@ func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *containeraw return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneMainVolume{ + return &ClusterControlPlaneMainVolume{ Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), - VolumeType: containeraws.ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + VolumeType: ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } -func flattenContainerAwsClusterControlPlaneMainVolume(obj *containeraws.ClusterControlPlaneMainVolume) interface{} { +func flattenContainerAwsClusterControlPlaneMainVolume(obj *ClusterControlPlaneMainVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1300,22 +1292,22 @@ func flattenContainerAwsClusterControlPlaneMainVolume(obj *containeraws.ClusterC } -func expandContainerAwsClusterControlPlaneProxyConfig(o interface{}) *containeraws.ClusterControlPlaneProxyConfig { +func expandContainerAwsClusterControlPlaneProxyConfig(o interface{}) *ClusterControlPlaneProxyConfig { if o == nil { - return containeraws.EmptyClusterControlPlaneProxyConfig + return EmptyClusterControlPlaneProxyConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlaneProxyConfig + return EmptyClusterControlPlaneProxyConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneProxyConfig{ + return &ClusterControlPlaneProxyConfig{ SecretArn: dcl.String(obj["secret_arn"].(string)), SecretVersion: dcl.String(obj["secret_version"].(string)), } } -func flattenContainerAwsClusterControlPlaneProxyConfig(obj *containeraws.ClusterControlPlaneProxyConfig) interface{} { +func flattenContainerAwsClusterControlPlaneProxyConfig(obj *ClusterControlPlaneProxyConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1328,7 +1320,7 @@ func flattenContainerAwsClusterControlPlaneProxyConfig(obj *containeraws.Cluster } -func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *containeraws.ClusterControlPlaneRootVolume { +func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *ClusterControlPlaneRootVolume { if o == nil { return nil } @@ -1337,16 +1329,16 @@ func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *containeraw return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneRootVolume{ + return &ClusterControlPlaneRootVolume{ Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), - VolumeType: containeraws.ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + VolumeType: ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } -func flattenContainerAwsClusterControlPlaneRootVolume(obj *containeraws.ClusterControlPlaneRootVolume) interface{} { +func flattenContainerAwsClusterControlPlaneRootVolume(obj *ClusterControlPlaneRootVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1362,21 +1354,21 @@ func flattenContainerAwsClusterControlPlaneRootVolume(obj *containeraws.ClusterC } -func expandContainerAwsClusterControlPlaneSshConfig(o interface{}) *containeraws.ClusterControlPlaneSshConfig { +func expandContainerAwsClusterControlPlaneSshConfig(o interface{}) *ClusterControlPlaneSshConfig { if o == nil { - return containeraws.EmptyClusterControlPlaneSshConfig + return EmptyClusterControlPlaneSshConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterControlPlaneSshConfig + return EmptyClusterControlPlaneSshConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterControlPlaneSshConfig{ + return &ClusterControlPlaneSshConfig{ Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), } } -func flattenContainerAwsClusterControlPlaneSshConfig(obj *containeraws.ClusterControlPlaneSshConfig) interface{} { +func flattenContainerAwsClusterControlPlaneSshConfig(obj *ClusterControlPlaneSshConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1388,21 +1380,21 @@ func flattenContainerAwsClusterControlPlaneSshConfig(obj *containeraws.ClusterCo } -func expandContainerAwsClusterFleet(o interface{}) *containeraws.ClusterFleet { +func expandContainerAwsClusterFleet(o interface{}) *ClusterFleet { if o == nil { - return containeraws.EmptyClusterFleet + return EmptyClusterFleet } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterFleet + return EmptyClusterFleet } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterFleet{ + return &ClusterFleet{ Project: dcl.StringOrNil(obj["project"].(string)), } } -func flattenContainerAwsClusterFleet(obj *containeraws.ClusterFleet) interface{} { +func flattenContainerAwsClusterFleet(obj *ClusterFleet) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1415,24 +1407,24 @@ func flattenContainerAwsClusterFleet(obj *containeraws.ClusterFleet) interface{} } -func expandContainerAwsClusterNetworking(o interface{}) *containeraws.ClusterNetworking { +func expandContainerAwsClusterNetworking(o interface{}) *ClusterNetworking { if o == nil { - return containeraws.EmptyClusterNetworking + return EmptyClusterNetworking } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyClusterNetworking + return EmptyClusterNetworking } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterNetworking{ - PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), - ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), + return &ClusterNetworking{ + PodAddressCidrBlocks: dcl.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: dcl.ExpandStringArray(obj["service_address_cidr_blocks"]), VPCId: dcl.String(obj["vpc_id"].(string)), PerNodePoolSgRulesDisabled: dcl.Bool(obj["per_node_pool_sg_rules_disabled"].(bool)), } } -func flattenContainerAwsClusterNetworking(obj *containeraws.ClusterNetworking) interface{} { +func flattenContainerAwsClusterNetworking(obj *ClusterNetworking) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1447,7 +1439,7 @@ func flattenContainerAwsClusterNetworking(obj *containeraws.ClusterNetworking) i } -func expandContainerAwsClusterBinaryAuthorization(o interface{}) *containeraws.ClusterBinaryAuthorization { +func expandContainerAwsClusterBinaryAuthorization(o interface{}) *ClusterBinaryAuthorization { if o == nil { return nil } @@ -1456,12 +1448,12 @@ func expandContainerAwsClusterBinaryAuthorization(o interface{}) *containeraws.C return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterBinaryAuthorization{ - EvaluationMode: containeraws.ClusterBinaryAuthorizationEvaluationModeEnumRef(obj["evaluation_mode"].(string)), + return &ClusterBinaryAuthorization{ + EvaluationMode: ClusterBinaryAuthorizationEvaluationModeEnumRef(obj["evaluation_mode"].(string)), } } -func flattenContainerAwsClusterBinaryAuthorization(obj *containeraws.ClusterBinaryAuthorization) interface{} { +func flattenContainerAwsClusterBinaryAuthorization(obj *ClusterBinaryAuthorization) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1474,7 +1466,7 @@ func flattenContainerAwsClusterBinaryAuthorization(obj *containeraws.ClusterBina } {{- if ne $.TargetVersionName "ga" }} -func expandContainerAwsClusterLoggingConfig(o interface{}) *containeraws.ClusterLoggingConfig { +func expandContainerAwsClusterLoggingConfig(o interface{}) *ClusterLoggingConfig { if o == nil { return nil } @@ -1483,12 +1475,12 @@ func expandContainerAwsClusterLoggingConfig(o interface{}) *containeraws.Cluster return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterLoggingConfig{ + return &ClusterLoggingConfig{ ComponentConfig: expandContainerAwsClusterLoggingConfigComponentConfig(obj["component_config"]), } } -func flattenContainerAwsClusterLoggingConfig(obj *containeraws.ClusterLoggingConfig) interface{} { +func flattenContainerAwsClusterLoggingConfig(obj *ClusterLoggingConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1500,7 +1492,7 @@ func flattenContainerAwsClusterLoggingConfig(obj *containeraws.ClusterLoggingCon } -func expandContainerAwsClusterLoggingConfigComponentConfig(o interface{}) *containeraws.ClusterLoggingConfigComponentConfig { +func expandContainerAwsClusterLoggingConfigComponentConfig(o interface{}) *ClusterLoggingConfigComponentConfig { if o == nil { return nil } @@ -1509,12 +1501,12 @@ func expandContainerAwsClusterLoggingConfigComponentConfig(o interface{}) *conta return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.ClusterLoggingConfigComponentConfig{ + return &ClusterLoggingConfigComponentConfig{ EnableComponents: expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), } } -func flattenContainerAwsClusterLoggingConfigComponentConfig(obj *containeraws.ClusterLoggingConfigComponentConfig) interface{} { +func flattenContainerAwsClusterLoggingConfigComponentConfig(obj *ClusterLoggingConfigComponentConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1527,7 +1519,7 @@ func flattenContainerAwsClusterLoggingConfigComponentConfig(obj *containeraws.Cl } {{- end }} -func flattenContainerAwsClusterWorkloadIdentityConfig(obj *containeraws.ClusterWorkloadIdentityConfig) interface{} { +func flattenContainerAwsClusterWorkloadIdentityConfig(obj *ClusterWorkloadIdentityConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1557,7 +1549,7 @@ func flattenContainerAwsClusterAnnotations(v map[string]string, d *schema.Resour } {{- if ne $.TargetVersionName "ga" }} -func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { +func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(obj []ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { if obj == nil { return nil } @@ -1567,11 +1559,11 @@ func flattenContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray } return items } -func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum { +func expandContainerAwsClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) - items := make([]containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) for _, item := range objs { - i := containeraws.ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + i := ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) items = append(items, *i) } return items diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl index 6a75936b71e5..d084a72d39a0 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_generated_test.go.tmpl @@ -3,20 +3,15 @@ package containeraws_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" -{{- else }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccContainerAwsCluster_BasicHandWritten(t *testing.T) { @@ -1015,7 +1010,7 @@ func testAccCheckContainerAwsClusterDestroyProducer(t *testing.T) func(s *terraf UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + client := containeraws.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) _, err := client.GetCluster(context.Background(), obj) if err == nil { return fmt.Errorf("google_container_aws_cluster still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl index 25d433702171..d34cb8486735 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl @@ -8,15 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" -{{- else }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" -{{- end }} - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -40,7 +32,7 @@ func ResourceContainerAwsNodePool() *schema.Resource { CustomizeDiff: customdiff.All( tpgresource.DefaultProviderProject, {{- if ne $.TargetVersionName "ga" }} - tpgdclresource.ResourceContainerAwsNodePoolCustomizeDiffFunc, + dcl.ResourceContainerAwsNodePoolCustomizeDiffFunc, {{- end }} tpgresource.SetAnnotationsDiff, ), @@ -600,7 +592,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} return err } - obj := &containeraws.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAwsNodePoolConfig(d.Get("config")), @@ -621,7 +613,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := tpgdclresource.CreateDirective + directive := dcl.CreateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -631,7 +623,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -660,7 +652,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) return err } - obj := &containeraws.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAwsNodePoolConfig(d.Get("config")), @@ -685,7 +677,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -695,7 +687,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) res, err := client.GetNodePool(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAwsNodePool %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return dcl.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("autoscaling", flattenContainerAwsNodePoolAutoscaling(res.Autoscaling)); err != nil { @@ -728,7 +720,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil { return fmt.Errorf("error setting kubelet_config in state: %s", err) } - if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil { + if err = d.Set("management", flattenContainerAwsNodePoolManagement(res.Management)); err != nil { return fmt.Errorf("error setting management in state: %s", err) } if err = d.Set("project", res.Project); err != nil { @@ -768,7 +760,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} return err } - obj := &containeraws.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAwsNodePoolConfig(d.Get("config")), @@ -783,7 +775,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), } - directive := tpgdclresource.UpdateDirective + directive := dcl.UpdateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -794,7 +786,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -823,7 +815,7 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} return err } - obj := &containeraws.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAwsNodePoolConfig(d.Get("config")), @@ -849,7 +841,7 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -885,22 +877,22 @@ func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{} return []*schema.ResourceData{d}, nil } -func expandContainerAwsNodePoolAutoscaling(o interface{}) *containeraws.NodePoolAutoscaling { +func expandContainerAwsNodePoolAutoscaling(o interface{}) *NodePoolAutoscaling { if o == nil { - return containeraws.EmptyNodePoolAutoscaling + return EmptyNodePoolAutoscaling } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolAutoscaling + return EmptyNodePoolAutoscaling } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolAutoscaling{ + return &NodePoolAutoscaling{ MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), } } -func flattenContainerAwsNodePoolAutoscaling(obj *containeraws.NodePoolAutoscaling) interface{} { +func flattenContainerAwsNodePoolAutoscaling(obj *NodePoolAutoscaling) interface{} { if obj == nil || obj.Empty() { return nil } @@ -913,16 +905,16 @@ func flattenContainerAwsNodePoolAutoscaling(obj *containeraws.NodePoolAutoscalin } -func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfig { +func expandContainerAwsNodePoolConfig(o interface{}) *NodePoolConfig { if o == nil { - return containeraws.EmptyNodePoolConfig + return EmptyNodePoolConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfig + return EmptyNodePoolConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfig{ + return &NodePoolConfig{ ConfigEncryption: expandContainerAwsNodePoolConfigConfigEncryption(obj["config_encryption"]), IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), AutoscalingMetricsCollection: expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj["autoscaling_metrics_collection"]), @@ -934,7 +926,7 @@ func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfi Labels: tpgresource.CheckStringMap(obj["labels"]), ProxyConfig: expandContainerAwsNodePoolConfigProxyConfig(obj["proxy_config"]), RootVolume: expandContainerAwsNodePoolConfigRootVolume(obj["root_volume"]), - SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), + SecurityGroupIds: dcl.ExpandStringArray(obj["security_group_ids"]), {{- if ne $.TargetVersionName "ga" }} SpotConfig: expandContainerAwsNodePoolConfigSpotConfig(obj["spot_config"]), {{- end }} @@ -944,7 +936,7 @@ func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfi } } -func flattenContainerAwsNodePoolConfig(obj *containeraws.NodePoolConfig) interface{} { +func flattenContainerAwsNodePoolConfig(obj *NodePoolConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -973,21 +965,21 @@ func flattenContainerAwsNodePoolConfig(obj *containeraws.NodePoolConfig) interfa } -func expandContainerAwsNodePoolConfigConfigEncryption(o interface{}) *containeraws.NodePoolConfigConfigEncryption { +func expandContainerAwsNodePoolConfigConfigEncryption(o interface{}) *NodePoolConfigConfigEncryption { if o == nil { - return containeraws.EmptyNodePoolConfigConfigEncryption + return EmptyNodePoolConfigConfigEncryption } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfigConfigEncryption + return EmptyNodePoolConfigConfigEncryption } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigConfigEncryption{ + return &NodePoolConfigConfigEncryption{ KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), } } -func flattenContainerAwsNodePoolConfigConfigEncryption(obj *containeraws.NodePoolConfigConfigEncryption) interface{} { +func flattenContainerAwsNodePoolConfigConfigEncryption(obj *NodePoolConfigConfigEncryption) interface{} { if obj == nil || obj.Empty() { return nil } @@ -999,22 +991,22 @@ func flattenContainerAwsNodePoolConfigConfigEncryption(obj *containeraws.NodePoo } -func expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(o interface{}) *containeraws.NodePoolConfigAutoscalingMetricsCollection { +func expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(o interface{}) *NodePoolConfigAutoscalingMetricsCollection { if o == nil { - return containeraws.EmptyNodePoolConfigAutoscalingMetricsCollection + return EmptyNodePoolConfigAutoscalingMetricsCollection } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfigAutoscalingMetricsCollection + return EmptyNodePoolConfigAutoscalingMetricsCollection } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigAutoscalingMetricsCollection{ + return &NodePoolConfigAutoscalingMetricsCollection{ Granularity: dcl.String(obj["granularity"].(string)), - Metrics: tpgdclresource.ExpandStringArray(obj["metrics"]), + Metrics: dcl.ExpandStringArray(obj["metrics"]), } } -func flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj *containeraws.NodePoolConfigAutoscalingMetricsCollection) interface{} { +func flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj *NodePoolConfigAutoscalingMetricsCollection) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1028,7 +1020,7 @@ func flattenContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj *containe } -func expandContainerAwsNodePoolConfigInstancePlacement(o interface{}) *containeraws.NodePoolConfigInstancePlacement { +func expandContainerAwsNodePoolConfigInstancePlacement(o interface{}) *NodePoolConfigInstancePlacement { if o == nil { return nil } @@ -1037,12 +1029,12 @@ func expandContainerAwsNodePoolConfigInstancePlacement(o interface{}) *container return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigInstancePlacement{ - Tenancy: containeraws.NodePoolConfigInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), + return &NodePoolConfigInstancePlacement{ + Tenancy: NodePoolConfigInstancePlacementTenancyEnumRef(obj["tenancy"].(string)), } } -func flattenContainerAwsNodePoolConfigInstancePlacement(obj *containeraws.NodePoolConfigInstancePlacement) interface{} { +func flattenContainerAwsNodePoolConfigInstancePlacement(obj *NodePoolConfigInstancePlacement) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1055,22 +1047,22 @@ func flattenContainerAwsNodePoolConfigInstancePlacement(obj *containeraws.NodePo } -func expandContainerAwsNodePoolConfigProxyConfig(o interface{}) *containeraws.NodePoolConfigProxyConfig { +func expandContainerAwsNodePoolConfigProxyConfig(o interface{}) *NodePoolConfigProxyConfig { if o == nil { - return containeraws.EmptyNodePoolConfigProxyConfig + return EmptyNodePoolConfigProxyConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfigProxyConfig + return EmptyNodePoolConfigProxyConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigProxyConfig{ + return &NodePoolConfigProxyConfig{ SecretArn: dcl.String(obj["secret_arn"].(string)), SecretVersion: dcl.String(obj["secret_version"].(string)), } } -func flattenContainerAwsNodePoolConfigProxyConfig(obj *containeraws.NodePoolConfigProxyConfig) interface{} { +func flattenContainerAwsNodePoolConfigProxyConfig(obj *NodePoolConfigProxyConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1083,7 +1075,7 @@ func flattenContainerAwsNodePoolConfigProxyConfig(obj *containeraws.NodePoolConf } -func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *containeraws.NodePoolConfigRootVolume { +func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *NodePoolConfigRootVolume { if o == nil { return nil } @@ -1092,16 +1084,16 @@ func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *containeraws.Nod return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigRootVolume{ + return &NodePoolConfigRootVolume{ Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), - VolumeType: containeraws.NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + VolumeType: NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } -func flattenContainerAwsNodePoolConfigRootVolume(obj *containeraws.NodePoolConfigRootVolume) interface{} { +func flattenContainerAwsNodePoolConfigRootVolume(obj *NodePoolConfigRootVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1118,21 +1110,21 @@ func flattenContainerAwsNodePoolConfigRootVolume(obj *containeraws.NodePoolConfi } {{- if ne $.TargetVersionName "ga" }} -func expandContainerAwsNodePoolConfigSpotConfig(o interface{}) *containeraws.NodePoolConfigSpotConfig { +func expandContainerAwsNodePoolConfigSpotConfig(o interface{}) *NodePoolConfigSpotConfig { if o == nil { - return containeraws.EmptyNodePoolConfigSpotConfig + return EmptyNodePoolConfigSpotConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfigSpotConfig + return EmptyNodePoolConfigSpotConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigSpotConfig{ - InstanceTypes: tpgdclresource.ExpandStringArray(obj["instance_types"]), + return &NodePoolConfigSpotConfig{ + InstanceTypes: dcl.ExpandStringArray(obj["instance_types"]), } } -func flattenContainerAwsNodePoolConfigSpotConfig(obj *containeraws.NodePoolConfigSpotConfig) interface{} { +func flattenContainerAwsNodePoolConfigSpotConfig(obj *NodePoolConfigSpotConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1145,21 +1137,21 @@ func flattenContainerAwsNodePoolConfigSpotConfig(obj *containeraws.NodePoolConfi } {{- end }} -func expandContainerAwsNodePoolConfigSshConfig(o interface{}) *containeraws.NodePoolConfigSshConfig { +func expandContainerAwsNodePoolConfigSshConfig(o interface{}) *NodePoolConfigSshConfig { if o == nil { - return containeraws.EmptyNodePoolConfigSshConfig + return EmptyNodePoolConfigSshConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolConfigSshConfig + return EmptyNodePoolConfigSshConfig } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolConfigSshConfig{ + return &NodePoolConfigSshConfig{ Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), } } -func flattenContainerAwsNodePoolConfigSshConfig(obj *containeraws.NodePoolConfigSshConfig) interface{} { +func flattenContainerAwsNodePoolConfigSshConfig(obj *NodePoolConfigSshConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1170,17 +1162,17 @@ func flattenContainerAwsNodePoolConfigSshConfig(obj *containeraws.NodePoolConfig return []interface{}{transformed} } -func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []containeraws.NodePoolConfigTaints { +func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []NodePoolConfigTaints { if o == nil { - return make([]containeraws.NodePoolConfigTaints, 0) + return make([]NodePoolConfigTaints, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containeraws.NodePoolConfigTaints, 0) + return make([]NodePoolConfigTaints, 0) } - items := make([]containeraws.NodePoolConfigTaints, 0, len(objs)) + items := make([]NodePoolConfigTaints, 0, len(objs)) for _, item := range objs { i := expandContainerAwsNodePoolConfigTaints(item) items = append(items, *i) @@ -1189,20 +1181,20 @@ func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []containeraws.N return items } -func expandContainerAwsNodePoolConfigTaints(o interface{}) *containeraws.NodePoolConfigTaints { +func expandContainerAwsNodePoolConfigTaints(o interface{}) *NodePoolConfigTaints { if o == nil { - return containeraws.EmptyNodePoolConfigTaints + return EmptyNodePoolConfigTaints } obj := o.(map[string]interface{}) - return &containeraws.NodePoolConfigTaints{ - Effect: containeraws.NodePoolConfigTaintsEffectEnumRef(obj["effect"].(string)), + return &NodePoolConfigTaints{ + Effect: NodePoolConfigTaintsEffectEnumRef(obj["effect"].(string)), Key: dcl.String(obj["key"].(string)), Value: dcl.String(obj["value"].(string)), } } -func flattenContainerAwsNodePoolConfigTaintsArray(objs []containeraws.NodePoolConfigTaints) []interface{} { +func flattenContainerAwsNodePoolConfigTaintsArray(objs []NodePoolConfigTaints) []interface{} { if objs == nil { return nil } @@ -1216,7 +1208,7 @@ func flattenContainerAwsNodePoolConfigTaintsArray(objs []containeraws.NodePoolCo return items } -func flattenContainerAwsNodePoolConfigTaints(obj *containeraws.NodePoolConfigTaints) interface{} { +func flattenContainerAwsNodePoolConfigTaints(obj *NodePoolConfigTaints) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1230,21 +1222,21 @@ func flattenContainerAwsNodePoolConfigTaints(obj *containeraws.NodePoolConfigTai } -func expandContainerAwsNodePoolMaxPodsConstraint(o interface{}) *containeraws.NodePoolMaxPodsConstraint { +func expandContainerAwsNodePoolMaxPodsConstraint(o interface{}) *NodePoolMaxPodsConstraint { if o == nil { - return containeraws.EmptyNodePoolMaxPodsConstraint + return EmptyNodePoolMaxPodsConstraint } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containeraws.EmptyNodePoolMaxPodsConstraint + return EmptyNodePoolMaxPodsConstraint } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolMaxPodsConstraint{ + return &NodePoolMaxPodsConstraint{ MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), } } -func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxPodsConstraint) interface{} { +func flattenContainerAwsNodePoolMaxPodsConstraint(obj *NodePoolMaxPodsConstraint) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1256,7 +1248,7 @@ func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxP } -func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig { +func expandContainerAwsNodePoolKubeletConfig(o interface{}) *NodePoolKubeletConfig { if o == nil { return nil } @@ -1265,15 +1257,15 @@ func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePo return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolKubeletConfig{ + return &NodePoolKubeletConfig{ CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)), CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)), - CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), + CpuManagerPolicy: NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))), } } -func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} { +func flattenContainerAwsNodePoolKubeletConfig(obj *NodePoolKubeletConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1288,7 +1280,7 @@ func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletC } -func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement { +func expandContainerAwsNodePoolManagement(o interface{}) *NodePoolManagement { if o == nil { return nil } @@ -1297,12 +1289,12 @@ func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolM return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolManagement{ + return &NodePoolManagement{ AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), } } -func flattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement) interface{} { +func flattenContainerAwsNodePoolManagement(obj *NodePoolManagement) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1314,7 +1306,7 @@ func flattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement) } -func expandContainerAwsNodePoolUpdateSettings(o interface{}) *containeraws.NodePoolUpdateSettings { +func expandContainerAwsNodePoolUpdateSettings(o interface{}) *NodePoolUpdateSettings { if o == nil { return nil } @@ -1323,12 +1315,12 @@ func expandContainerAwsNodePoolUpdateSettings(o interface{}) *containeraws.NodeP return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolUpdateSettings{ + return &NodePoolUpdateSettings{ SurgeSettings: expandContainerAwsNodePoolUpdateSettingsSurgeSettings(obj["surge_settings"]), } } -func flattenContainerAwsNodePoolUpdateSettings(obj *containeraws.NodePoolUpdateSettings) interface{} { +func flattenContainerAwsNodePoolUpdateSettings(obj *NodePoolUpdateSettings) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1340,7 +1332,7 @@ func flattenContainerAwsNodePoolUpdateSettings(obj *containeraws.NodePoolUpdateS } -func expandContainerAwsNodePoolUpdateSettingsSurgeSettings(o interface{}) *containeraws.NodePoolUpdateSettingsSurgeSettings { +func expandContainerAwsNodePoolUpdateSettingsSurgeSettings(o interface{}) *NodePoolUpdateSettingsSurgeSettings { if o == nil { return nil } @@ -1349,13 +1341,13 @@ func expandContainerAwsNodePoolUpdateSettingsSurgeSettings(o interface{}) *conta return nil } obj := objArr[0].(map[string]interface{}) - return &containeraws.NodePoolUpdateSettingsSurgeSettings{ + return &NodePoolUpdateSettingsSurgeSettings{ MaxSurge: dcl.Int64OrNil(int64(obj["max_surge"].(int))), MaxUnavailable: dcl.Int64OrNil(int64(obj["max_unavailable"].(int))), } } -func flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj *containeraws.NodePoolUpdateSettingsSurgeSettings) interface{} { +func flattenContainerAwsNodePoolUpdateSettingsSurgeSettings(obj *NodePoolUpdateSettingsSurgeSettings) interface{} { if obj == nil || obj.Empty() { return nil } diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl index 8c06b68622d3..29ff7e1aa9ec 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_generated_test.go.tmpl @@ -3,20 +3,15 @@ package containeraws_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" -{{- else }} - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccContainerAwsNodePool_BasicHandWritten(t *testing.T) { @@ -1576,7 +1571,7 @@ func testAccCheckContainerAwsNodePoolDestroyProducer(t *testing.T) func(s *terra UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) + client := containeraws.NewDCLContainerAwsClient(config, config.UserAgent, billingProject, 0) _, err := client.GetNodePool(context.Background(), obj) if err == nil { return fmt.Errorf("google_container_aws_node_pool still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl index 55a1e33739d4..aca3cf0b360a 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl @@ -3,20 +3,15 @@ package containerazure_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" -{{- else }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccContainerAzureClient_BasicHandWritten(t *testing.T) { @@ -87,7 +82,7 @@ func testAccCheckContainerAzureClientDestroyProducer(t *testing.T) func(s *terra Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), } - client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) _, err := client.GetClient(context.Background(), obj) if err == nil { return fmt.Errorf("google_container_azure_client still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl index 0e40dc78cefd..4a14dfde99ef 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster.go.tmpl @@ -8,15 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" -{{- else }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" -{{- end }} - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -571,7 +563,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ return err } - obj := &containerazure.Cluster{ + obj := &Cluster{ Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), AzureRegion: dcl.String(d.Get("azure_region").(string)), ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), @@ -595,7 +587,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := tpgdclresource.CreateDirective + directive := dcl.CreateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -605,7 +597,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -634,7 +626,7 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) return err } - obj := &containerazure.Cluster{ + obj := &Cluster{ Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), AzureRegion: dcl.String(d.Get("azure_region").(string)), ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), @@ -662,7 +654,7 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -672,7 +664,7 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) res, err := client.GetCluster(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAzureCluster %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return dcl.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("authorization", flattenContainerAzureClusterAuthorization(res.Authorization)); err != nil { @@ -756,7 +748,7 @@ func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{ return err } - obj := &containerazure.Cluster{ + obj := &Cluster{ Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), AzureRegion: dcl.String(d.Get("azure_region").(string)), ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), @@ -774,7 +766,7 @@ func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{ {{- end }} Project: dcl.String(project), } - directive := tpgdclresource.UpdateDirective + directive := dcl.UpdateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -785,7 +777,7 @@ func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{ if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -814,7 +806,7 @@ func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{ return err } - obj := &containerazure.Cluster{ + obj := &Cluster{ Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), AzureRegion: dcl.String(d.Get("azure_region").(string)), ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), @@ -843,7 +835,7 @@ func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{ if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -879,22 +871,22 @@ func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{ return []*schema.ResourceData{d}, nil } -func expandContainerAzureClusterAuthorization(o interface{}) *containerazure.ClusterAuthorization { +func expandContainerAzureClusterAuthorization(o interface{}) *ClusterAuthorization { if o == nil { - return containerazure.EmptyClusterAuthorization + return EmptyClusterAuthorization } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterAuthorization + return EmptyClusterAuthorization } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterAuthorization{ + return &ClusterAuthorization{ AdminUsers: expandContainerAzureClusterAuthorizationAdminUsersArray(obj["admin_users"]), AdminGroups: expandContainerAzureClusterAuthorizationAdminGroupsArray(obj["admin_groups"]), } } -func flattenContainerAzureClusterAuthorization(obj *containerazure.ClusterAuthorization) interface{} { +func flattenContainerAzureClusterAuthorization(obj *ClusterAuthorization) interface{} { if obj == nil || obj.Empty() { return nil } @@ -906,17 +898,17 @@ func flattenContainerAzureClusterAuthorization(obj *containerazure.ClusterAuthor return []interface{}{transformed} } -func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []containerazure.ClusterAuthorizationAdminUsers { +func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []ClusterAuthorizationAdminUsers { if o == nil { - return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + return make([]ClusterAuthorizationAdminUsers, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + return make([]ClusterAuthorizationAdminUsers, 0) } - items := make([]containerazure.ClusterAuthorizationAdminUsers, 0, len(objs)) + items := make([]ClusterAuthorizationAdminUsers, 0, len(objs)) for _, item := range objs { i := expandContainerAzureClusterAuthorizationAdminUsers(item) items = append(items, *i) @@ -925,18 +917,18 @@ func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []co return items } -func expandContainerAzureClusterAuthorizationAdminUsers(o interface{}) *containerazure.ClusterAuthorizationAdminUsers { +func expandContainerAzureClusterAuthorizationAdminUsers(o interface{}) *ClusterAuthorizationAdminUsers { if o == nil { - return containerazure.EmptyClusterAuthorizationAdminUsers + return EmptyClusterAuthorizationAdminUsers } obj := o.(map[string]interface{}) - return &containerazure.ClusterAuthorizationAdminUsers{ + return &ClusterAuthorizationAdminUsers{ Username: dcl.String(obj["username"].(string)), } } -func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []containerazure.ClusterAuthorizationAdminUsers) []interface{} { +func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []ClusterAuthorizationAdminUsers) []interface{} { if objs == nil { return nil } @@ -950,7 +942,7 @@ func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []containeraz return items } -func flattenContainerAzureClusterAuthorizationAdminUsers(obj *containerazure.ClusterAuthorizationAdminUsers) interface{} { +func flattenContainerAzureClusterAuthorizationAdminUsers(obj *ClusterAuthorizationAdminUsers) interface{} { if obj == nil || obj.Empty() { return nil } @@ -961,17 +953,17 @@ func flattenContainerAzureClusterAuthorizationAdminUsers(obj *containerazure.Clu return transformed } -func expandContainerAzureClusterAuthorizationAdminGroupsArray(o interface{}) []containerazure.ClusterAuthorizationAdminGroups { +func expandContainerAzureClusterAuthorizationAdminGroupsArray(o interface{}) []ClusterAuthorizationAdminGroups { if o == nil { - return make([]containerazure.ClusterAuthorizationAdminGroups, 0) + return make([]ClusterAuthorizationAdminGroups, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containerazure.ClusterAuthorizationAdminGroups, 0) + return make([]ClusterAuthorizationAdminGroups, 0) } - items := make([]containerazure.ClusterAuthorizationAdminGroups, 0, len(objs)) + items := make([]ClusterAuthorizationAdminGroups, 0, len(objs)) for _, item := range objs { i := expandContainerAzureClusterAuthorizationAdminGroups(item) items = append(items, *i) @@ -980,18 +972,18 @@ func expandContainerAzureClusterAuthorizationAdminGroupsArray(o interface{}) []c return items } -func expandContainerAzureClusterAuthorizationAdminGroups(o interface{}) *containerazure.ClusterAuthorizationAdminGroups { +func expandContainerAzureClusterAuthorizationAdminGroups(o interface{}) *ClusterAuthorizationAdminGroups { if o == nil { - return containerazure.EmptyClusterAuthorizationAdminGroups + return EmptyClusterAuthorizationAdminGroups } obj := o.(map[string]interface{}) - return &containerazure.ClusterAuthorizationAdminGroups{ + return &ClusterAuthorizationAdminGroups{ Group: dcl.String(obj["group"].(string)), } } -func flattenContainerAzureClusterAuthorizationAdminGroupsArray(objs []containerazure.ClusterAuthorizationAdminGroups) []interface{} { +func flattenContainerAzureClusterAuthorizationAdminGroupsArray(objs []ClusterAuthorizationAdminGroups) []interface{} { if objs == nil { return nil } @@ -1005,7 +997,7 @@ func flattenContainerAzureClusterAuthorizationAdminGroupsArray(objs []containera return items } -func flattenContainerAzureClusterAuthorizationAdminGroups(obj *containerazure.ClusterAuthorizationAdminGroups) interface{} { +func flattenContainerAzureClusterAuthorizationAdminGroups(obj *ClusterAuthorizationAdminGroups) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1017,16 +1009,16 @@ func flattenContainerAzureClusterAuthorizationAdminGroups(obj *containerazure.Cl } -func expandContainerAzureClusterControlPlane(o interface{}) *containerazure.ClusterControlPlane { +func expandContainerAzureClusterControlPlane(o interface{}) *ClusterControlPlane { if o == nil { - return containerazure.EmptyClusterControlPlane + return EmptyClusterControlPlane } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterControlPlane + return EmptyClusterControlPlane } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlane{ + return &ClusterControlPlane{ SshConfig: expandContainerAzureClusterControlPlaneSshConfig(obj["ssh_config"]), SubnetId: dcl.String(obj["subnet_id"].(string)), Version: dcl.String(obj["version"].(string)), @@ -1040,7 +1032,7 @@ func expandContainerAzureClusterControlPlane(o interface{}) *containerazure.Clus } } -func flattenContainerAzureClusterControlPlane(obj *containerazure.ClusterControlPlane) interface{} { +func flattenContainerAzureClusterControlPlane(obj *ClusterControlPlane) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1061,21 +1053,21 @@ func flattenContainerAzureClusterControlPlane(obj *containerazure.ClusterControl } -func expandContainerAzureClusterControlPlaneSshConfig(o interface{}) *containerazure.ClusterControlPlaneSshConfig { +func expandContainerAzureClusterControlPlaneSshConfig(o interface{}) *ClusterControlPlaneSshConfig { if o == nil { - return containerazure.EmptyClusterControlPlaneSshConfig + return EmptyClusterControlPlaneSshConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterControlPlaneSshConfig + return EmptyClusterControlPlaneSshConfig } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlaneSshConfig{ + return &ClusterControlPlaneSshConfig{ AuthorizedKey: dcl.String(obj["authorized_key"].(string)), } } -func flattenContainerAzureClusterControlPlaneSshConfig(obj *containerazure.ClusterControlPlaneSshConfig) interface{} { +func flattenContainerAzureClusterControlPlaneSshConfig(obj *ClusterControlPlaneSshConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1087,21 +1079,21 @@ func flattenContainerAzureClusterControlPlaneSshConfig(obj *containerazure.Clust } -func expandContainerAzureClusterControlPlaneDatabaseEncryption(o interface{}) *containerazure.ClusterControlPlaneDatabaseEncryption { +func expandContainerAzureClusterControlPlaneDatabaseEncryption(o interface{}) *ClusterControlPlaneDatabaseEncryption { if o == nil { - return containerazure.EmptyClusterControlPlaneDatabaseEncryption + return EmptyClusterControlPlaneDatabaseEncryption } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterControlPlaneDatabaseEncryption + return EmptyClusterControlPlaneDatabaseEncryption } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlaneDatabaseEncryption{ + return &ClusterControlPlaneDatabaseEncryption{ KeyId: dcl.String(obj["key_id"].(string)), } } -func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *containerazure.ClusterControlPlaneDatabaseEncryption) interface{} { +func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *ClusterControlPlaneDatabaseEncryption) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1113,7 +1105,7 @@ func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *containeraz } -func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *containerazure.ClusterControlPlaneMainVolume { +func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *ClusterControlPlaneMainVolume { if o == nil { return nil } @@ -1122,12 +1114,12 @@ func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *container return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlaneMainVolume{ + return &ClusterControlPlaneMainVolume{ SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), } } -func flattenContainerAzureClusterControlPlaneMainVolume(obj *containerazure.ClusterControlPlaneMainVolume) interface{} { +func flattenContainerAzureClusterControlPlaneMainVolume(obj *ClusterControlPlaneMainVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1139,22 +1131,22 @@ func flattenContainerAzureClusterControlPlaneMainVolume(obj *containerazure.Clus } -func expandContainerAzureClusterControlPlaneProxyConfig(o interface{}) *containerazure.ClusterControlPlaneProxyConfig { +func expandContainerAzureClusterControlPlaneProxyConfig(o interface{}) *ClusterControlPlaneProxyConfig { if o == nil { - return containerazure.EmptyClusterControlPlaneProxyConfig + return EmptyClusterControlPlaneProxyConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterControlPlaneProxyConfig + return EmptyClusterControlPlaneProxyConfig } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlaneProxyConfig{ + return &ClusterControlPlaneProxyConfig{ ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), SecretId: dcl.String(obj["secret_id"].(string)), } } -func flattenContainerAzureClusterControlPlaneProxyConfig(obj *containerazure.ClusterControlPlaneProxyConfig) interface{} { +func flattenContainerAzureClusterControlPlaneProxyConfig(obj *ClusterControlPlaneProxyConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1166,17 +1158,17 @@ func flattenContainerAzureClusterControlPlaneProxyConfig(obj *containerazure.Clu return []interface{}{transformed} } -func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{}) []containerazure.ClusterControlPlaneReplicaPlacements { +func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{}) []ClusterControlPlaneReplicaPlacements { if o == nil { - return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + return make([]ClusterControlPlaneReplicaPlacements, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + return make([]ClusterControlPlaneReplicaPlacements, 0) } - items := make([]containerazure.ClusterControlPlaneReplicaPlacements, 0, len(objs)) + items := make([]ClusterControlPlaneReplicaPlacements, 0, len(objs)) for _, item := range objs { i := expandContainerAzureClusterControlPlaneReplicaPlacements(item) items = append(items, *i) @@ -1185,19 +1177,19 @@ func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{} return items } -func expandContainerAzureClusterControlPlaneReplicaPlacements(o interface{}) *containerazure.ClusterControlPlaneReplicaPlacements { +func expandContainerAzureClusterControlPlaneReplicaPlacements(o interface{}) *ClusterControlPlaneReplicaPlacements { if o == nil { - return containerazure.EmptyClusterControlPlaneReplicaPlacements + return EmptyClusterControlPlaneReplicaPlacements } obj := o.(map[string]interface{}) - return &containerazure.ClusterControlPlaneReplicaPlacements{ + return &ClusterControlPlaneReplicaPlacements{ AzureAvailabilityZone: dcl.String(obj["azure_availability_zone"].(string)), SubnetId: dcl.String(obj["subnet_id"].(string)), } } -func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []containerazure.ClusterControlPlaneReplicaPlacements) []interface{} { +func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []ClusterControlPlaneReplicaPlacements) []interface{} { if objs == nil { return nil } @@ -1211,7 +1203,7 @@ func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []conta return items } -func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *containerazure.ClusterControlPlaneReplicaPlacements) interface{} { +func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *ClusterControlPlaneReplicaPlacements) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1224,7 +1216,7 @@ func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *containerazu } -func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *containerazure.ClusterControlPlaneRootVolume { +func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *ClusterControlPlaneRootVolume { if o == nil { return nil } @@ -1233,12 +1225,12 @@ func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *container return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterControlPlaneRootVolume{ + return &ClusterControlPlaneRootVolume{ SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), } } -func flattenContainerAzureClusterControlPlaneRootVolume(obj *containerazure.ClusterControlPlaneRootVolume) interface{} { +func flattenContainerAzureClusterControlPlaneRootVolume(obj *ClusterControlPlaneRootVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1250,21 +1242,21 @@ func flattenContainerAzureClusterControlPlaneRootVolume(obj *containerazure.Clus } -func expandContainerAzureClusterFleet(o interface{}) *containerazure.ClusterFleet { +func expandContainerAzureClusterFleet(o interface{}) *ClusterFleet { if o == nil { - return containerazure.EmptyClusterFleet + return EmptyClusterFleet } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterFleet + return EmptyClusterFleet } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterFleet{ + return &ClusterFleet{ Project: dcl.StringOrNil(obj["project"].(string)), } } -func flattenContainerAzureClusterFleet(obj *containerazure.ClusterFleet) interface{} { +func flattenContainerAzureClusterFleet(obj *ClusterFleet) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1277,23 +1269,23 @@ func flattenContainerAzureClusterFleet(obj *containerazure.ClusterFleet) interfa } -func expandContainerAzureClusterNetworking(o interface{}) *containerazure.ClusterNetworking { +func expandContainerAzureClusterNetworking(o interface{}) *ClusterNetworking { if o == nil { - return containerazure.EmptyClusterNetworking + return EmptyClusterNetworking } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterNetworking + return EmptyClusterNetworking } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterNetworking{ - PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), - ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), + return &ClusterNetworking{ + PodAddressCidrBlocks: dcl.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: dcl.ExpandStringArray(obj["service_address_cidr_blocks"]), VirtualNetworkId: dcl.String(obj["virtual_network_id"].(string)), } } -func flattenContainerAzureClusterNetworking(obj *containerazure.ClusterNetworking) interface{} { +func flattenContainerAzureClusterNetworking(obj *ClusterNetworking) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1307,22 +1299,22 @@ func flattenContainerAzureClusterNetworking(obj *containerazure.ClusterNetworkin } -func expandContainerAzureClusterAzureServicesAuthentication(o interface{}) *containerazure.ClusterAzureServicesAuthentication { +func expandContainerAzureClusterAzureServicesAuthentication(o interface{}) *ClusterAzureServicesAuthentication { if o == nil { - return containerazure.EmptyClusterAzureServicesAuthentication + return EmptyClusterAzureServicesAuthentication } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyClusterAzureServicesAuthentication + return EmptyClusterAzureServicesAuthentication } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterAzureServicesAuthentication{ + return &ClusterAzureServicesAuthentication{ ApplicationId: dcl.String(obj["application_id"].(string)), TenantId: dcl.String(obj["tenant_id"].(string)), } } -func flattenContainerAzureClusterAzureServicesAuthentication(obj *containerazure.ClusterAzureServicesAuthentication) interface{} { +func flattenContainerAzureClusterAzureServicesAuthentication(obj *ClusterAzureServicesAuthentication) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1336,7 +1328,7 @@ func flattenContainerAzureClusterAzureServicesAuthentication(obj *containerazure } {{- if ne $.TargetVersionName "ga" }} -func expandContainerAzureClusterLoggingConfig(o interface{}) *containerazure.ClusterLoggingConfig { +func expandContainerAzureClusterLoggingConfig(o interface{}) *ClusterLoggingConfig { if o == nil { return nil } @@ -1345,12 +1337,12 @@ func expandContainerAzureClusterLoggingConfig(o interface{}) *containerazure.Clu return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterLoggingConfig{ + return &ClusterLoggingConfig{ ComponentConfig: expandContainerAzureClusterLoggingConfigComponentConfig(obj["component_config"]), } } -func flattenContainerAzureClusterLoggingConfig(obj *containerazure.ClusterLoggingConfig) interface{} { +func flattenContainerAzureClusterLoggingConfig(obj *ClusterLoggingConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1362,7 +1354,7 @@ func flattenContainerAzureClusterLoggingConfig(obj *containerazure.ClusterLoggin } -func expandContainerAzureClusterLoggingConfigComponentConfig(o interface{}) *containerazure.ClusterLoggingConfigComponentConfig { +func expandContainerAzureClusterLoggingConfigComponentConfig(o interface{}) *ClusterLoggingConfigComponentConfig { if o == nil { return nil } @@ -1371,12 +1363,12 @@ func expandContainerAzureClusterLoggingConfigComponentConfig(o interface{}) *con return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.ClusterLoggingConfigComponentConfig{ + return &ClusterLoggingConfigComponentConfig{ EnableComponents: expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj["enable_components"]), } } -func flattenContainerAzureClusterLoggingConfigComponentConfig(obj *containerazure.ClusterLoggingConfigComponentConfig) interface{} { +func flattenContainerAzureClusterLoggingConfigComponentConfig(obj *ClusterLoggingConfigComponentConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1389,7 +1381,7 @@ func flattenContainerAzureClusterLoggingConfigComponentConfig(obj *containerazur } {{- end }} -func flattenContainerAzureClusterWorkloadIdentityConfig(obj *containerazure.ClusterWorkloadIdentityConfig) interface{} { +func flattenContainerAzureClusterWorkloadIdentityConfig(obj *ClusterWorkloadIdentityConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1419,7 +1411,7 @@ func flattenContainerAzureClusterAnnotations(v map[string]string, d *schema.Reso } {{- if ne $.TargetVersionName "ga" }} -func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { +func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(obj []ClusterLoggingConfigComponentConfigEnableComponentsEnum) interface{} { if obj == nil { return nil } @@ -1429,11 +1421,11 @@ func flattenContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArr } return items } -func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum { +func expandContainerAzureClusterLoggingConfigComponentConfigEnableComponentsArray(o interface{}) []ClusterLoggingConfigComponentConfigEnableComponentsEnum { objs := o.([]interface{}) - items := make([]containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) + items := make([]ClusterLoggingConfigComponentConfigEnableComponentsEnum, 0, len(objs)) for _, item := range objs { - i := containerazure.ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) + i := ClusterLoggingConfigComponentConfigEnableComponentsEnumRef(item.(string)) items = append(items, *i) } return items diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl index e78919da273b..9cf452ced8b9 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_generated_test.go.tmpl @@ -3,20 +3,15 @@ package containerazure_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" -{{- else }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccContainerAzureCluster_BasicHandWritten(t *testing.T) { @@ -661,7 +656,7 @@ func testAccCheckContainerAzureClusterDestroyProducer(t *testing.T) func(s *terr UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) _, err := client.GetCluster(context.Background(), obj) if err == nil { return fmt.Errorf("google_container_azure_cluster still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl index 632f0e7d3875..185e109c6163 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl @@ -8,15 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" -{{- else }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" -{{- end }} - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -345,7 +337,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface return err } - obj := &containerazure.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAzureNodePoolConfig(d.Get("config")), @@ -365,7 +357,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := tpgdclresource.CreateDirective + directive := dcl.CreateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -375,7 +367,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -404,7 +396,7 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} return err } - obj := &containerazure.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAzureNodePoolConfig(d.Get("config")), @@ -428,7 +420,7 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -438,7 +430,7 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} res, err := client.GetNodePool(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAzureNodePool %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return dcl.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("autoscaling", flattenContainerAzureNodePoolAutoscaling(res.Autoscaling)); err != nil { @@ -471,7 +463,7 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} if err = d.Set("effective_annotations", res.Annotations); err != nil { return fmt.Errorf("error setting effective_annotations in state: %s", err) } - if err = d.Set("management", tpgresource.FlattenContainerAzureNodePoolManagement(res.Management, d, config)); err != nil { + if err = d.Set("management", flattenContainerAzureNodePoolManagement(res.Management)); err != nil { return fmt.Errorf("error setting management in state: %s", err) } if err = d.Set("project", res.Project); err != nil { @@ -508,7 +500,7 @@ func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface return err } - obj := &containerazure.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAzureNodePoolConfig(d.Get("config")), @@ -522,7 +514,7 @@ func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface Management: expandContainerAzureNodePoolManagement(d.Get("management")), Project: dcl.String(project), } - directive := tpgdclresource.UpdateDirective + directive := dcl.UpdateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -533,7 +525,7 @@ func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -562,7 +554,7 @@ func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface return err } - obj := &containerazure.NodePool{ + obj := &NodePool{ Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), Cluster: dcl.String(d.Get("cluster").(string)), Config: expandContainerAzureNodePoolConfig(d.Get("config")), @@ -587,7 +579,7 @@ func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -623,22 +615,22 @@ func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface return []*schema.ResourceData{d}, nil } -func expandContainerAzureNodePoolAutoscaling(o interface{}) *containerazure.NodePoolAutoscaling { +func expandContainerAzureNodePoolAutoscaling(o interface{}) *NodePoolAutoscaling { if o == nil { - return containerazure.EmptyNodePoolAutoscaling + return EmptyNodePoolAutoscaling } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyNodePoolAutoscaling + return EmptyNodePoolAutoscaling } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolAutoscaling{ + return &NodePoolAutoscaling{ MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), } } -func flattenContainerAzureNodePoolAutoscaling(obj *containerazure.NodePoolAutoscaling) interface{} { +func flattenContainerAzureNodePoolAutoscaling(obj *NodePoolAutoscaling) interface{} { if obj == nil || obj.Empty() { return nil } @@ -651,16 +643,16 @@ func flattenContainerAzureNodePoolAutoscaling(obj *containerazure.NodePoolAutosc } -func expandContainerAzureNodePoolConfig(o interface{}) *containerazure.NodePoolConfig { +func expandContainerAzureNodePoolConfig(o interface{}) *NodePoolConfig { if o == nil { - return containerazure.EmptyNodePoolConfig + return EmptyNodePoolConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyNodePoolConfig + return EmptyNodePoolConfig } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolConfig{ + return &NodePoolConfig{ SshConfig: expandContainerAzureNodePoolConfigSshConfig(obj["ssh_config"]), {{- if ne $.TargetVersionName "ga" }} ImageType: dcl.StringOrNil(obj["image_type"].(string)), @@ -673,7 +665,7 @@ func expandContainerAzureNodePoolConfig(o interface{}) *containerazure.NodePoolC } } -func flattenContainerAzureNodePoolConfig(obj *containerazure.NodePoolConfig) interface{} { +func flattenContainerAzureNodePoolConfig(obj *NodePoolConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -693,21 +685,21 @@ func flattenContainerAzureNodePoolConfig(obj *containerazure.NodePoolConfig) int } -func expandContainerAzureNodePoolConfigSshConfig(o interface{}) *containerazure.NodePoolConfigSshConfig { +func expandContainerAzureNodePoolConfigSshConfig(o interface{}) *NodePoolConfigSshConfig { if o == nil { - return containerazure.EmptyNodePoolConfigSshConfig + return EmptyNodePoolConfigSshConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyNodePoolConfigSshConfig + return EmptyNodePoolConfigSshConfig } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolConfigSshConfig{ + return &NodePoolConfigSshConfig{ AuthorizedKey: dcl.String(obj["authorized_key"].(string)), } } -func flattenContainerAzureNodePoolConfigSshConfig(obj *containerazure.NodePoolConfigSshConfig) interface{} { +func flattenContainerAzureNodePoolConfigSshConfig(obj *NodePoolConfigSshConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -719,22 +711,22 @@ func flattenContainerAzureNodePoolConfigSshConfig(obj *containerazure.NodePoolCo } -func expandContainerAzureNodePoolConfigProxyConfig(o interface{}) *containerazure.NodePoolConfigProxyConfig { +func expandContainerAzureNodePoolConfigProxyConfig(o interface{}) *NodePoolConfigProxyConfig { if o == nil { - return containerazure.EmptyNodePoolConfigProxyConfig + return EmptyNodePoolConfigProxyConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyNodePoolConfigProxyConfig + return EmptyNodePoolConfigProxyConfig } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolConfigProxyConfig{ + return &NodePoolConfigProxyConfig{ ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), SecretId: dcl.String(obj["secret_id"].(string)), } } -func flattenContainerAzureNodePoolConfigProxyConfig(obj *containerazure.NodePoolConfigProxyConfig) interface{} { +func flattenContainerAzureNodePoolConfigProxyConfig(obj *NodePoolConfigProxyConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -747,7 +739,7 @@ func flattenContainerAzureNodePoolConfigProxyConfig(obj *containerazure.NodePool } -func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *containerazure.NodePoolConfigRootVolume { +func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *NodePoolConfigRootVolume { if o == nil { return nil } @@ -756,12 +748,12 @@ func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *containerazure return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolConfigRootVolume{ + return &NodePoolConfigRootVolume{ SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), } } -func flattenContainerAzureNodePoolConfigRootVolume(obj *containerazure.NodePoolConfigRootVolume) interface{} { +func flattenContainerAzureNodePoolConfigRootVolume(obj *NodePoolConfigRootVolume) interface{} { if obj == nil || obj.Empty() { return nil } @@ -773,21 +765,21 @@ func flattenContainerAzureNodePoolConfigRootVolume(obj *containerazure.NodePoolC } -func expandContainerAzureNodePoolMaxPodsConstraint(o interface{}) *containerazure.NodePoolMaxPodsConstraint { +func expandContainerAzureNodePoolMaxPodsConstraint(o interface{}) *NodePoolMaxPodsConstraint { if o == nil { - return containerazure.EmptyNodePoolMaxPodsConstraint + return EmptyNodePoolMaxPodsConstraint } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return containerazure.EmptyNodePoolMaxPodsConstraint + return EmptyNodePoolMaxPodsConstraint } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolMaxPodsConstraint{ + return &NodePoolMaxPodsConstraint{ MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), } } -func flattenContainerAzureNodePoolMaxPodsConstraint(obj *containerazure.NodePoolMaxPodsConstraint) interface{} { +func flattenContainerAzureNodePoolMaxPodsConstraint(obj *NodePoolMaxPodsConstraint) interface{} { if obj == nil || obj.Empty() { return nil } @@ -799,7 +791,7 @@ func flattenContainerAzureNodePoolMaxPodsConstraint(obj *containerazure.NodePool } -func expandContainerAzureNodePoolManagement(o interface{}) *containerazure.NodePoolManagement { +func expandContainerAzureNodePoolManagement(o interface{}) *NodePoolManagement { if o == nil { return nil } @@ -808,12 +800,12 @@ func expandContainerAzureNodePoolManagement(o interface{}) *containerazure.NodeP return nil } obj := objArr[0].(map[string]interface{}) - return &containerazure.NodePoolManagement{ + return &NodePoolManagement{ AutoRepair: dcl.Bool(obj["auto_repair"].(bool)), } } -func flattenContainerAzureNodePoolManagement(obj *containerazure.NodePoolManagement) interface{} { +func flattenContainerAzureNodePoolManagement(obj *NodePoolManagement) interface{} { if obj == nil || obj.Empty() { return nil } diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl index 3a608d2b3d29..5f46d012f13f 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_generated_test.go.tmpl @@ -3,20 +3,15 @@ package containerazure_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" -{{- else }} - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccContainerAzureNodePool_BasicHandWritten(t *testing.T) { @@ -588,7 +583,7 @@ func testAccCheckContainerAzureNodePoolDestroyProducer(t *testing.T) func(s *ter UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) + client := containerazure.NewDCLContainerAzureClient(config, config.UserAgent, billingProject, 0) _, err := client.GetNodePool(context.Background(), obj) if err == nil { return fmt.Errorf("google_container_azure_node_pool still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl index 1c0c78e41767..ed9cfab421b7 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl @@ -3,20 +3,15 @@ package dataplex_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" -{{- else }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccDataplexAsset_BasicAssetHandWritten(t *testing.T) { @@ -228,7 +223,7 @@ func testAccCheckDataplexAssetDestroyProducer(t *testing.T) func(s *terraform.St UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) _, err := client.GetAsset(context.Background(), obj) if err == nil { return fmt.Errorf("google_dataplex_asset still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl index d04efc4d65d3..f26d5255171d 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl @@ -3,20 +3,15 @@ package dataplex_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" -{{- else }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccDataplexLake_BasicLake(t *testing.T) { @@ -121,7 +116,7 @@ func testAccCheckDataplexLakeDestroyProducer(t *testing.T) func(s *terraform.Sta UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) _, err := client.GetLake(context.Background(), obj) if err == nil { return fmt.Errorf("google_dataplex_lake still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl index a915cddac1e1..5c71215790ea 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl @@ -3,20 +3,15 @@ package dataplex_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" -{{- else }} - dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccDataplexZone_BasicZone(t *testing.T) { @@ -165,7 +160,7 @@ func testAccCheckDataplexZoneDestroyProducer(t *testing.T) func(s *terraform.Sta UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) + client := dataplex.NewDCLDataplexClient(config, config.UserAgent, billingProject, 0) _, err := client.GetZone(context.Background(), obj) if err == nil { return fmt.Errorf("google_dataplex_zone still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl deleted file mode 100644 index 311864119df1..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy.go.tmpl +++ /dev/null @@ -1,571 +0,0 @@ -package dataproc - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -type AutoscalingPolicy struct { - Name *string `json:"name"` - BasicAlgorithm *AutoscalingPolicyBasicAlgorithm `json:"basicAlgorithm"` - WorkerConfig *AutoscalingPolicyWorkerConfig `json:"workerConfig"` - SecondaryWorkerConfig *AutoscalingPolicySecondaryWorkerConfig `json:"secondaryWorkerConfig"` - Project *string `json:"project"` - Location *string `json:"location"` -} - -func (r *AutoscalingPolicy) String() string { - return dcl.SprintResource(r) -} - -type AutoscalingPolicyBasicAlgorithm struct { - empty bool `json:"-"` - YarnConfig *AutoscalingPolicyBasicAlgorithmYarnConfig `json:"yarnConfig"` - CooldownPeriod *string `json:"cooldownPeriod"` -} - -type jsonAutoscalingPolicyBasicAlgorithm AutoscalingPolicyBasicAlgorithm - -func (r *AutoscalingPolicyBasicAlgorithm) UnmarshalJSON(data []byte) error { - var res jsonAutoscalingPolicyBasicAlgorithm - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyAutoscalingPolicyBasicAlgorithm - } else { - - r.YarnConfig = res.YarnConfig - - r.CooldownPeriod = res.CooldownPeriod - - } - return nil -} - -// This object is used to assert a desired state where this AutoscalingPolicyBasicAlgorithm is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyAutoscalingPolicyBasicAlgorithm *AutoscalingPolicyBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{empty: true} - -func (r *AutoscalingPolicyBasicAlgorithm) Empty() bool { - return r.empty -} - -func (r *AutoscalingPolicyBasicAlgorithm) String() string { - return dcl.SprintResource(r) -} - -func (r *AutoscalingPolicyBasicAlgorithm) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type AutoscalingPolicyBasicAlgorithmYarnConfig struct { - empty bool `json:"-"` - GracefulDecommissionTimeout *string `json:"gracefulDecommissionTimeout"` - ScaleUpFactor *float64 `json:"scaleUpFactor"` - ScaleDownFactor *float64 `json:"scaleDownFactor"` - ScaleUpMinWorkerFraction *float64 `json:"scaleUpMinWorkerFraction"` - ScaleDownMinWorkerFraction *float64 `json:"scaleDownMinWorkerFraction"` -} - -type jsonAutoscalingPolicyBasicAlgorithmYarnConfig AutoscalingPolicyBasicAlgorithmYarnConfig - -func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) UnmarshalJSON(data []byte) error { - var res jsonAutoscalingPolicyBasicAlgorithmYarnConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyAutoscalingPolicyBasicAlgorithmYarnConfig - } else { - - r.GracefulDecommissionTimeout = res.GracefulDecommissionTimeout - - r.ScaleUpFactor = res.ScaleUpFactor - - r.ScaleDownFactor = res.ScaleDownFactor - - r.ScaleUpMinWorkerFraction = res.ScaleUpMinWorkerFraction - - r.ScaleDownMinWorkerFraction = res.ScaleDownMinWorkerFraction - - } - return nil -} - -// This object is used to assert a desired state where this AutoscalingPolicyBasicAlgorithmYarnConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyAutoscalingPolicyBasicAlgorithmYarnConfig *AutoscalingPolicyBasicAlgorithmYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{empty: true} - -func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) Empty() bool { - return r.empty -} - -func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type AutoscalingPolicyWorkerConfig struct { - empty bool `json:"-"` - MinInstances *int64 `json:"minInstances"` - MaxInstances *int64 `json:"maxInstances"` - Weight *int64 `json:"weight"` -} - -type jsonAutoscalingPolicyWorkerConfig AutoscalingPolicyWorkerConfig - -func (r *AutoscalingPolicyWorkerConfig) UnmarshalJSON(data []byte) error { - var res jsonAutoscalingPolicyWorkerConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyAutoscalingPolicyWorkerConfig - } else { - - r.MinInstances = res.MinInstances - - r.MaxInstances = res.MaxInstances - - r.Weight = res.Weight - - } - return nil -} - -// This object is used to assert a desired state where this AutoscalingPolicyWorkerConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyAutoscalingPolicyWorkerConfig *AutoscalingPolicyWorkerConfig = &AutoscalingPolicyWorkerConfig{empty: true} - -func (r *AutoscalingPolicyWorkerConfig) Empty() bool { - return r.empty -} - -func (r *AutoscalingPolicyWorkerConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *AutoscalingPolicyWorkerConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type AutoscalingPolicySecondaryWorkerConfig struct { - empty bool `json:"-"` - MinInstances *int64 `json:"minInstances"` - MaxInstances *int64 `json:"maxInstances"` - Weight *int64 `json:"weight"` -} - -type jsonAutoscalingPolicySecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfig - -func (r *AutoscalingPolicySecondaryWorkerConfig) UnmarshalJSON(data []byte) error { - var res jsonAutoscalingPolicySecondaryWorkerConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyAutoscalingPolicySecondaryWorkerConfig - } else { - - r.MinInstances = res.MinInstances - - r.MaxInstances = res.MaxInstances - - r.Weight = res.Weight - - } - return nil -} - -// This object is used to assert a desired state where this AutoscalingPolicySecondaryWorkerConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyAutoscalingPolicySecondaryWorkerConfig *AutoscalingPolicySecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{empty: true} - -func (r *AutoscalingPolicySecondaryWorkerConfig) Empty() bool { - return r.empty -} - -func (r *AutoscalingPolicySecondaryWorkerConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *AutoscalingPolicySecondaryWorkerConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *AutoscalingPolicy) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "dataproc", - Type: "AutoscalingPolicy", - Version: "beta", - } -} - -func (r *AutoscalingPolicy) ID() (string, error) { - if err := extractAutoscalingPolicyFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "basic_algorithm": dcl.ValueOrEmptyString(nr.BasicAlgorithm), - "worker_config": dcl.ValueOrEmptyString(nr.WorkerConfig), - "secondary_worker_config": dcl.ValueOrEmptyString(nr.SecondaryWorkerConfig), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", params), nil -} - -const AutoscalingPolicyMaxPage = -1 - -type AutoscalingPolicyList struct { - Items []*AutoscalingPolicy - - nextToken string - - pageSize int32 - - resource *AutoscalingPolicy -} - -func (l *AutoscalingPolicyList) HasNext() bool { - return l.nextToken != "" -} - -func (l *AutoscalingPolicyList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listAutoscalingPolicy(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListAutoscalingPolicy(ctx context.Context, project, location string) (*AutoscalingPolicyList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListAutoscalingPolicyWithMaxResults(ctx, project, location, AutoscalingPolicyMaxPage) - -} - -func (c *Client) ListAutoscalingPolicyWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*AutoscalingPolicyList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &AutoscalingPolicy{ - Project: &project, - Location: &location, - } - items, token, err := c.listAutoscalingPolicy(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &AutoscalingPolicyList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) (*AutoscalingPolicy, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractAutoscalingPolicyFields(r) - - b, err := c.getAutoscalingPolicyRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalAutoscalingPolicy(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeAutoscalingPolicyNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractAutoscalingPolicyFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("AutoscalingPolicy resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting AutoscalingPolicy...") - deleteOp := deleteAutoscalingPolicyOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllAutoscalingPolicy deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllAutoscalingPolicy(ctx context.Context, project, location string, filter func(*AutoscalingPolicy) bool) error { - listObj, err := c.ListAutoscalingPolicy(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllAutoscalingPolicy(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllAutoscalingPolicy(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyAutoscalingPolicy(ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *AutoscalingPolicy - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyAutoscalingPolicyHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyAutoscalingPolicyHelper(c *Client, ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyAutoscalingPolicy...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractAutoscalingPolicyFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.autoscalingPolicyDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToAutoscalingPolicyDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []autoscalingPolicyApiOperation - if create { - ops = append(ops, &createAutoscalingPolicyOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyAutoscalingPolicyDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyAutoscalingPolicyDiff(c *Client, ctx context.Context, desired *AutoscalingPolicy, rawDesired *AutoscalingPolicy, ops []autoscalingPolicyApiOperation, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetAutoscalingPolicy(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createAutoscalingPolicyOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapAutoscalingPolicy(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeAutoscalingPolicyNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeAutoscalingPolicyNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeAutoscalingPolicyDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractAutoscalingPolicyFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractAutoscalingPolicyFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffAutoscalingPolicy(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl deleted file mode 100644 index 9d3c4c9197a8..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_internal.go.tmpl +++ /dev/null @@ -1,2102 +0,0 @@ -package dataproc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func (r *AutoscalingPolicy) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "basicAlgorithm"); err != nil { - return err - } - if err := dcl.Required(r, "workerConfig"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.BasicAlgorithm) { - if err := r.BasicAlgorithm.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { - if err := r.WorkerConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { - if err := r.SecondaryWorkerConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *AutoscalingPolicyBasicAlgorithm) validate() error { - if err := dcl.Required(r, "yarnConfig"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.YarnConfig) { - if err := r.YarnConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *AutoscalingPolicyBasicAlgorithmYarnConfig) validate() error { - if err := dcl.Required(r, "gracefulDecommissionTimeout"); err != nil { - return err - } - if err := dcl.Required(r, "scaleUpFactor"); err != nil { - return err - } - if err := dcl.Required(r, "scaleDownFactor"); err != nil { - return err - } - return nil -} -func (r *AutoscalingPolicyWorkerConfig) validate() error { - if err := dcl.Required(r, "maxInstances"); err != nil { - return err - } - return nil -} -func (r *AutoscalingPolicySecondaryWorkerConfig) validate() error { - return nil -} -func (r *AutoscalingPolicy) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) -} - -func (r *AutoscalingPolicy) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -func (r *AutoscalingPolicy) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies", nr.basePath(), userBasePath, params), nil - -} - -func (r *AutoscalingPolicy) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies", nr.basePath(), userBasePath, params), nil - -} - -func (r *AutoscalingPolicy) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -// autoscalingPolicyApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type autoscalingPolicyApiOperation interface { - do(context.Context, *AutoscalingPolicy, *Client) error -} - -// newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest creates a request for an -// AutoscalingPolicy resource's UpdateAutoscalingPolicy update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(ctx context.Context, f *AutoscalingPolicy, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v, err := expandAutoscalingPolicyBasicAlgorithm(c, f.BasicAlgorithm, res); err != nil { - return nil, fmt.Errorf("error expanding BasicAlgorithm into basicAlgorithm: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["basicAlgorithm"] = v - } - if v, err := expandAutoscalingPolicyWorkerConfig(c, f.WorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["workerConfig"] = v - } - if v, err := expandAutoscalingPolicySecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["secondaryWorkerConfig"] = v - } - if v, err := dcl.DeriveField("%s", f.Name); err != nil { - return nil, err - } else { - req["id"] = v - } - - return req, nil -} - -// marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest converts the update into -// the final JSON request body. -func marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateAutoscalingPolicyUpdateAutoscalingPolicyOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateAutoscalingPolicyUpdateAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { - _, err := c.GetAutoscalingPolicy(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateAutoscalingPolicy") - if err != nil { - return err - } - - req, err := newUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateAutoscalingPolicyUpdateAutoscalingPolicyRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PUT", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listAutoscalingPolicyRaw(ctx context.Context, r *AutoscalingPolicy, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != AutoscalingPolicyMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listAutoscalingPolicyOperation struct { - Policies []map[string]interface{} `json:"policies"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listAutoscalingPolicy(ctx context.Context, r *AutoscalingPolicy, pageToken string, pageSize int32) ([]*AutoscalingPolicy, string, error) { - b, err := c.listAutoscalingPolicyRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listAutoscalingPolicyOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*AutoscalingPolicy - for _, v := range m.Policies { - res, err := unmarshalMapAutoscalingPolicy(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllAutoscalingPolicy(ctx context.Context, f func(*AutoscalingPolicy) bool, resources []*AutoscalingPolicy) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteAutoscalingPolicy(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteAutoscalingPolicyOperation struct{} - -func (op *deleteAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { - r, err := c.GetAutoscalingPolicy(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "AutoscalingPolicy not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetAutoscalingPolicy checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete AutoscalingPolicy: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetAutoscalingPolicy(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createAutoscalingPolicyOperation struct { - response map[string]interface{} -} - -func (op *createAutoscalingPolicyOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createAutoscalingPolicyOperation) do(ctx context.Context, r *AutoscalingPolicy, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetAutoscalingPolicy(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getAutoscalingPolicyRaw(ctx context.Context, r *AutoscalingPolicy) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) autoscalingPolicyDiffsForRawDesired(ctx context.Context, rawDesired *AutoscalingPolicy, opts ...dcl.ApplyOption) (initial, desired *AutoscalingPolicy, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *AutoscalingPolicy - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*AutoscalingPolicy); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected AutoscalingPolicy, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetAutoscalingPolicy(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a AutoscalingPolicy resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve AutoscalingPolicy resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that AutoscalingPolicy resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for AutoscalingPolicy: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for AutoscalingPolicy: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractAutoscalingPolicyFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeAutoscalingPolicyInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for AutoscalingPolicy: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for AutoscalingPolicy: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffAutoscalingPolicy(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeAutoscalingPolicyInitialState(rawInitial, rawDesired *AutoscalingPolicy) (*AutoscalingPolicy, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeAutoscalingPolicyDesiredState(rawDesired, rawInitial *AutoscalingPolicy, opts ...dcl.ApplyOption) (*AutoscalingPolicy, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.BasicAlgorithm = canonicalizeAutoscalingPolicyBasicAlgorithm(rawDesired.BasicAlgorithm, nil, opts...) - rawDesired.WorkerConfig = canonicalizeAutoscalingPolicyWorkerConfig(rawDesired.WorkerConfig, nil, opts...) - rawDesired.SecondaryWorkerConfig = canonicalizeAutoscalingPolicySecondaryWorkerConfig(rawDesired.SecondaryWorkerConfig, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &AutoscalingPolicy{} - if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - canonicalDesired.BasicAlgorithm = canonicalizeAutoscalingPolicyBasicAlgorithm(rawDesired.BasicAlgorithm, rawInitial.BasicAlgorithm, opts...) - canonicalDesired.WorkerConfig = canonicalizeAutoscalingPolicyWorkerConfig(rawDesired.WorkerConfig, rawInitial.WorkerConfig, opts...) - canonicalDesired.SecondaryWorkerConfig = canonicalizeAutoscalingPolicySecondaryWorkerConfig(rawDesired.SecondaryWorkerConfig, rawInitial.SecondaryWorkerConfig, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - return canonicalDesired, nil -} - -func canonicalizeAutoscalingPolicyNewState(c *Client, rawNew, rawDesired *AutoscalingPolicy) (*AutoscalingPolicy, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.BasicAlgorithm) && dcl.IsEmptyValueIndirect(rawDesired.BasicAlgorithm) { - rawNew.BasicAlgorithm = rawDesired.BasicAlgorithm - } else { - rawNew.BasicAlgorithm = canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, rawDesired.BasicAlgorithm, rawNew.BasicAlgorithm) - } - - if dcl.IsEmptyValueIndirect(rawNew.WorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.WorkerConfig) { - rawNew.WorkerConfig = rawDesired.WorkerConfig - } else { - rawNew.WorkerConfig = canonicalizeNewAutoscalingPolicyWorkerConfig(c, rawDesired.WorkerConfig, rawNew.WorkerConfig) - } - - if dcl.IsEmptyValueIndirect(rawNew.SecondaryWorkerConfig) && dcl.IsEmptyValueIndirect(rawDesired.SecondaryWorkerConfig) { - rawNew.SecondaryWorkerConfig = rawDesired.SecondaryWorkerConfig - } else { - rawNew.SecondaryWorkerConfig = canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, rawDesired.SecondaryWorkerConfig, rawNew.SecondaryWorkerConfig) - } - - rawNew.Project = rawDesired.Project - - rawNew.Location = rawDesired.Location - - return rawNew, nil -} - -func canonicalizeAutoscalingPolicyBasicAlgorithm(des, initial *AutoscalingPolicyBasicAlgorithm, opts ...dcl.ApplyOption) *AutoscalingPolicyBasicAlgorithm { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &AutoscalingPolicyBasicAlgorithm{} - - cDes.YarnConfig = canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(des.YarnConfig, initial.YarnConfig, opts...) - if dcl.StringCanonicalize(des.CooldownPeriod, initial.CooldownPeriod) || dcl.IsZeroValue(des.CooldownPeriod) { - cDes.CooldownPeriod = initial.CooldownPeriod - } else { - cDes.CooldownPeriod = des.CooldownPeriod - } - - return cDes -} - -func canonicalizeAutoscalingPolicyBasicAlgorithmSlice(des, initial []AutoscalingPolicyBasicAlgorithm, opts ...dcl.ApplyOption) []AutoscalingPolicyBasicAlgorithm { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(des)) - for _, d := range des { - cd := canonicalizeAutoscalingPolicyBasicAlgorithm(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(des)) - for i, d := range des { - cd := canonicalizeAutoscalingPolicyBasicAlgorithm(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithm(c *Client, des, nw *AutoscalingPolicyBasicAlgorithm) *AutoscalingPolicyBasicAlgorithm { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyBasicAlgorithm while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.YarnConfig = canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, des.YarnConfig, nw.YarnConfig) - if dcl.StringCanonicalize(des.CooldownPeriod, nw.CooldownPeriod) { - nw.CooldownPeriod = des.CooldownPeriod - } - - return nw -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithmSet(c *Client, des, nw []AutoscalingPolicyBasicAlgorithm) []AutoscalingPolicyBasicAlgorithm { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []AutoscalingPolicyBasicAlgorithm - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareAutoscalingPolicyBasicAlgorithmNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithmSlice(c *Client, des, nw []AutoscalingPolicyBasicAlgorithm) []AutoscalingPolicyBasicAlgorithm { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []AutoscalingPolicyBasicAlgorithm - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithm(c, &d, &n)) - } - - return items -} - -func canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(des, initial *AutoscalingPolicyBasicAlgorithmYarnConfig, opts ...dcl.ApplyOption) *AutoscalingPolicyBasicAlgorithmYarnConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &AutoscalingPolicyBasicAlgorithmYarnConfig{} - - if dcl.StringCanonicalize(des.GracefulDecommissionTimeout, initial.GracefulDecommissionTimeout) || dcl.IsZeroValue(des.GracefulDecommissionTimeout) { - cDes.GracefulDecommissionTimeout = initial.GracefulDecommissionTimeout - } else { - cDes.GracefulDecommissionTimeout = des.GracefulDecommissionTimeout - } - if dcl.IsZeroValue(des.ScaleUpFactor) || (dcl.IsEmptyValueIndirect(des.ScaleUpFactor) && dcl.IsEmptyValueIndirect(initial.ScaleUpFactor)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ScaleUpFactor = initial.ScaleUpFactor - } else { - cDes.ScaleUpFactor = des.ScaleUpFactor - } - if dcl.IsZeroValue(des.ScaleDownFactor) || (dcl.IsEmptyValueIndirect(des.ScaleDownFactor) && dcl.IsEmptyValueIndirect(initial.ScaleDownFactor)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ScaleDownFactor = initial.ScaleDownFactor - } else { - cDes.ScaleDownFactor = des.ScaleDownFactor - } - if dcl.IsZeroValue(des.ScaleUpMinWorkerFraction) || (dcl.IsEmptyValueIndirect(des.ScaleUpMinWorkerFraction) && dcl.IsEmptyValueIndirect(initial.ScaleUpMinWorkerFraction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ScaleUpMinWorkerFraction = initial.ScaleUpMinWorkerFraction - } else { - cDes.ScaleUpMinWorkerFraction = des.ScaleUpMinWorkerFraction - } - if dcl.IsZeroValue(des.ScaleDownMinWorkerFraction) || (dcl.IsEmptyValueIndirect(des.ScaleDownMinWorkerFraction) && dcl.IsEmptyValueIndirect(initial.ScaleDownMinWorkerFraction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ScaleDownMinWorkerFraction = initial.ScaleDownMinWorkerFraction - } else { - cDes.ScaleDownMinWorkerFraction = des.ScaleDownMinWorkerFraction - } - - return cDes -} - -func canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfigSlice(des, initial []AutoscalingPolicyBasicAlgorithmYarnConfig, opts ...dcl.ApplyOption) []AutoscalingPolicyBasicAlgorithmYarnConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeAutoscalingPolicyBasicAlgorithmYarnConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, des, nw *AutoscalingPolicyBasicAlgorithmYarnConfig) *AutoscalingPolicyBasicAlgorithmYarnConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyBasicAlgorithmYarnConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.GracefulDecommissionTimeout, nw.GracefulDecommissionTimeout) { - nw.GracefulDecommissionTimeout = des.GracefulDecommissionTimeout - } - - return nw -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfigSet(c *Client, des, nw []AutoscalingPolicyBasicAlgorithmYarnConfig) []AutoscalingPolicyBasicAlgorithmYarnConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []AutoscalingPolicyBasicAlgorithmYarnConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, des, nw []AutoscalingPolicyBasicAlgorithmYarnConfig) []AutoscalingPolicyBasicAlgorithmYarnConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []AutoscalingPolicyBasicAlgorithmYarnConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewAutoscalingPolicyBasicAlgorithmYarnConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeAutoscalingPolicyWorkerConfig(des, initial *AutoscalingPolicyWorkerConfig, opts ...dcl.ApplyOption) *AutoscalingPolicyWorkerConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &AutoscalingPolicyWorkerConfig{} - - if dcl.IsZeroValue(des.MinInstances) || (dcl.IsEmptyValueIndirect(des.MinInstances) && dcl.IsEmptyValueIndirect(initial.MinInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MinInstances = initial.MinInstances - } else { - cDes.MinInstances = des.MinInstances - } - if dcl.IsZeroValue(des.MaxInstances) || (dcl.IsEmptyValueIndirect(des.MaxInstances) && dcl.IsEmptyValueIndirect(initial.MaxInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MaxInstances = initial.MaxInstances - } else { - cDes.MaxInstances = des.MaxInstances - } - if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Weight = initial.Weight - } else { - cDes.Weight = des.Weight - } - - return cDes -} - -func canonicalizeAutoscalingPolicyWorkerConfigSlice(des, initial []AutoscalingPolicyWorkerConfig, opts ...dcl.ApplyOption) []AutoscalingPolicyWorkerConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]AutoscalingPolicyWorkerConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeAutoscalingPolicyWorkerConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]AutoscalingPolicyWorkerConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeAutoscalingPolicyWorkerConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewAutoscalingPolicyWorkerConfig(c *Client, des, nw *AutoscalingPolicyWorkerConfig) *AutoscalingPolicyWorkerConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicyWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewAutoscalingPolicyWorkerConfigSet(c *Client, des, nw []AutoscalingPolicyWorkerConfig) []AutoscalingPolicyWorkerConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []AutoscalingPolicyWorkerConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareAutoscalingPolicyWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewAutoscalingPolicyWorkerConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewAutoscalingPolicyWorkerConfigSlice(c *Client, des, nw []AutoscalingPolicyWorkerConfig) []AutoscalingPolicyWorkerConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []AutoscalingPolicyWorkerConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewAutoscalingPolicyWorkerConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeAutoscalingPolicySecondaryWorkerConfig(des, initial *AutoscalingPolicySecondaryWorkerConfig, opts ...dcl.ApplyOption) *AutoscalingPolicySecondaryWorkerConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &AutoscalingPolicySecondaryWorkerConfig{} - - if dcl.IsZeroValue(des.MinInstances) || (dcl.IsEmptyValueIndirect(des.MinInstances) && dcl.IsEmptyValueIndirect(initial.MinInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MinInstances = initial.MinInstances - } else { - cDes.MinInstances = des.MinInstances - } - if dcl.IsZeroValue(des.MaxInstances) || (dcl.IsEmptyValueIndirect(des.MaxInstances) && dcl.IsEmptyValueIndirect(initial.MaxInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MaxInstances = initial.MaxInstances - } else { - cDes.MaxInstances = des.MaxInstances - } - if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Weight = initial.Weight - } else { - cDes.Weight = des.Weight - } - - return cDes -} - -func canonicalizeAutoscalingPolicySecondaryWorkerConfigSlice(des, initial []AutoscalingPolicySecondaryWorkerConfig, opts ...dcl.ApplyOption) []AutoscalingPolicySecondaryWorkerConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeAutoscalingPolicySecondaryWorkerConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeAutoscalingPolicySecondaryWorkerConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c *Client, des, nw *AutoscalingPolicySecondaryWorkerConfig) *AutoscalingPolicySecondaryWorkerConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for AutoscalingPolicySecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewAutoscalingPolicySecondaryWorkerConfigSet(c *Client, des, nw []AutoscalingPolicySecondaryWorkerConfig) []AutoscalingPolicySecondaryWorkerConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []AutoscalingPolicySecondaryWorkerConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareAutoscalingPolicySecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, des, nw []AutoscalingPolicySecondaryWorkerConfig) []AutoscalingPolicySecondaryWorkerConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []AutoscalingPolicySecondaryWorkerConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewAutoscalingPolicySecondaryWorkerConfig(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffAutoscalingPolicy(c *Client, desired, actual *AutoscalingPolicy, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Id")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.BasicAlgorithm, actual.BasicAlgorithm, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyBasicAlgorithmNewStyle, EmptyObject: EmptyAutoscalingPolicyBasicAlgorithm, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("BasicAlgorithm")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyWorkerConfigNewStyle, EmptyObject: EmptyAutoscalingPolicyWorkerConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicySecondaryWorkerConfigNewStyle, EmptyObject: EmptyAutoscalingPolicySecondaryWorkerConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareAutoscalingPolicyBasicAlgorithmNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*AutoscalingPolicyBasicAlgorithm) - if !ok { - desiredNotPointer, ok := d.(AutoscalingPolicyBasicAlgorithm) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithm or *AutoscalingPolicyBasicAlgorithm", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*AutoscalingPolicyBasicAlgorithm) - if !ok { - actualNotPointer, ok := a.(AutoscalingPolicyBasicAlgorithm) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithm", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.YarnConfig, actual.YarnConfig, dcl.DiffInfo{ObjectFunction: compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle, EmptyObject: EmptyAutoscalingPolicyBasicAlgorithmYarnConfig, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("YarnConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CooldownPeriod, actual.CooldownPeriod, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("CooldownPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareAutoscalingPolicyBasicAlgorithmYarnConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*AutoscalingPolicyBasicAlgorithmYarnConfig) - if !ok { - desiredNotPointer, ok := d.(AutoscalingPolicyBasicAlgorithmYarnConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithmYarnConfig or *AutoscalingPolicyBasicAlgorithmYarnConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*AutoscalingPolicyBasicAlgorithmYarnConfig) - if !ok { - actualNotPointer, ok := a.(AutoscalingPolicyBasicAlgorithmYarnConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyBasicAlgorithmYarnConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GracefulDecommissionTimeout, actual.GracefulDecommissionTimeout, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("GracefulDecommissionTimeout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ScaleUpFactor, actual.ScaleUpFactor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleUpFactor")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ScaleDownFactor, actual.ScaleDownFactor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleDownFactor")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ScaleUpMinWorkerFraction, actual.ScaleUpMinWorkerFraction, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleUpMinWorkerFraction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ScaleDownMinWorkerFraction, actual.ScaleDownMinWorkerFraction, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("ScaleDownMinWorkerFraction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareAutoscalingPolicyWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*AutoscalingPolicyWorkerConfig) - if !ok { - desiredNotPointer, ok := d.(AutoscalingPolicyWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyWorkerConfig or *AutoscalingPolicyWorkerConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*AutoscalingPolicyWorkerConfig) - if !ok { - actualNotPointer, ok := a.(AutoscalingPolicyWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicyWorkerConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MinInstances, actual.MinInstances, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MinInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MaxInstances, actual.MaxInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MaxInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareAutoscalingPolicySecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*AutoscalingPolicySecondaryWorkerConfig) - if !ok { - desiredNotPointer, ok := d.(AutoscalingPolicySecondaryWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicySecondaryWorkerConfig or *AutoscalingPolicySecondaryWorkerConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*AutoscalingPolicySecondaryWorkerConfig) - if !ok { - actualNotPointer, ok := a.(AutoscalingPolicySecondaryWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a AutoscalingPolicySecondaryWorkerConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MinInstances, actual.MinInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MinInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MaxInstances, actual.MaxInstances, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("MaxInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateAutoscalingPolicyUpdateAutoscalingPolicyOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *AutoscalingPolicy) urlNormalized() *AutoscalingPolicy { - normalized := dcl.Copy(*r).(AutoscalingPolicy) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *AutoscalingPolicy) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateAutoscalingPolicy" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the AutoscalingPolicy resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *AutoscalingPolicy) marshal(c *Client) ([]byte, error) { - m, err := expandAutoscalingPolicy(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling AutoscalingPolicy: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalAutoscalingPolicy decodes JSON responses into the AutoscalingPolicy resource schema. -func unmarshalAutoscalingPolicy(b []byte, c *Client, res *AutoscalingPolicy) (*AutoscalingPolicy, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapAutoscalingPolicy(m, c, res) -} - -func unmarshalMapAutoscalingPolicy(m map[string]interface{}, c *Client, res *AutoscalingPolicy) (*AutoscalingPolicy, error) { - - flattened := flattenAutoscalingPolicy(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandAutoscalingPolicy expands AutoscalingPolicy into a JSON request object. -func expandAutoscalingPolicy(c *Client, f *AutoscalingPolicy) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["id"] = v - } - if v, err := expandAutoscalingPolicyBasicAlgorithm(c, f.BasicAlgorithm, res); err != nil { - return nil, fmt.Errorf("error expanding BasicAlgorithm into basicAlgorithm: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["basicAlgorithm"] = v - } - if v, err := expandAutoscalingPolicyWorkerConfig(c, f.WorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["workerConfig"] = v - } - if v, err := expandAutoscalingPolicySecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryWorkerConfig"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - - return m, nil -} - -// flattenAutoscalingPolicy flattens AutoscalingPolicy from a JSON request object into the -// AutoscalingPolicy type. -func flattenAutoscalingPolicy(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicy { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &AutoscalingPolicy{} - resultRes.Name = dcl.FlattenString(m["id"]) - resultRes.BasicAlgorithm = flattenAutoscalingPolicyBasicAlgorithm(c, m["basicAlgorithm"], res) - resultRes.WorkerConfig = flattenAutoscalingPolicyWorkerConfig(c, m["workerConfig"], res) - resultRes.SecondaryWorkerConfig = flattenAutoscalingPolicySecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - - return resultRes -} - -// expandAutoscalingPolicyBasicAlgorithmMap expands the contents of AutoscalingPolicyBasicAlgorithm into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithmMap(c *Client, f map[string]AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandAutoscalingPolicyBasicAlgorithm(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandAutoscalingPolicyBasicAlgorithmSlice expands the contents of AutoscalingPolicyBasicAlgorithm into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithmSlice(c *Client, f []AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandAutoscalingPolicyBasicAlgorithm(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenAutoscalingPolicyBasicAlgorithmMap flattens the contents of AutoscalingPolicyBasicAlgorithm from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithmMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyBasicAlgorithm { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]AutoscalingPolicyBasicAlgorithm{} - } - - if len(a) == 0 { - return map[string]AutoscalingPolicyBasicAlgorithm{} - } - - items := make(map[string]AutoscalingPolicyBasicAlgorithm) - for k, item := range a { - items[k] = *flattenAutoscalingPolicyBasicAlgorithm(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenAutoscalingPolicyBasicAlgorithmSlice flattens the contents of AutoscalingPolicyBasicAlgorithm from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithmSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyBasicAlgorithm { - a, ok := i.([]interface{}) - if !ok { - return []AutoscalingPolicyBasicAlgorithm{} - } - - if len(a) == 0 { - return []AutoscalingPolicyBasicAlgorithm{} - } - - items := make([]AutoscalingPolicyBasicAlgorithm, 0, len(a)) - for _, item := range a { - items = append(items, *flattenAutoscalingPolicyBasicAlgorithm(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandAutoscalingPolicyBasicAlgorithm expands an instance of AutoscalingPolicyBasicAlgorithm into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithm(c *Client, f *AutoscalingPolicyBasicAlgorithm, res *AutoscalingPolicy) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, f.YarnConfig, res); err != nil { - return nil, fmt.Errorf("error expanding YarnConfig into yarnConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yarnConfig"] = v - } - if v := f.CooldownPeriod; !dcl.IsEmptyValueIndirect(v) { - m["cooldownPeriod"] = v - } - - return m, nil -} - -// flattenAutoscalingPolicyBasicAlgorithm flattens an instance of AutoscalingPolicyBasicAlgorithm from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithm(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyBasicAlgorithm { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &AutoscalingPolicyBasicAlgorithm{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyAutoscalingPolicyBasicAlgorithm - } - r.YarnConfig = flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, m["yarnConfig"], res) - r.CooldownPeriod = dcl.FlattenString(m["cooldownPeriod"]) - - return r -} - -// expandAutoscalingPolicyBasicAlgorithmYarnConfigMap expands the contents of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithmYarnConfigMap(c *Client, f map[string]AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandAutoscalingPolicyBasicAlgorithmYarnConfigSlice expands the contents of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, f []AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandAutoscalingPolicyBasicAlgorithmYarnConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenAutoscalingPolicyBasicAlgorithmYarnConfigMap flattens the contents of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithmYarnConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyBasicAlgorithmYarnConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - - if len(a) == 0 { - return map[string]AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - - items := make(map[string]AutoscalingPolicyBasicAlgorithmYarnConfig) - for k, item := range a { - items[k] = *flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenAutoscalingPolicyBasicAlgorithmYarnConfigSlice flattens the contents of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithmYarnConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyBasicAlgorithmYarnConfig { - a, ok := i.([]interface{}) - if !ok { - return []AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - - if len(a) == 0 { - return []AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - - items := make([]AutoscalingPolicyBasicAlgorithmYarnConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandAutoscalingPolicyBasicAlgorithmYarnConfig expands an instance of AutoscalingPolicyBasicAlgorithmYarnConfig into a JSON -// request object. -func expandAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, f *AutoscalingPolicyBasicAlgorithmYarnConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GracefulDecommissionTimeout; !dcl.IsEmptyValueIndirect(v) { - m["gracefulDecommissionTimeout"] = v - } - if v := f.ScaleUpFactor; !dcl.IsEmptyValueIndirect(v) { - m["scaleUpFactor"] = v - } - if v := f.ScaleDownFactor; !dcl.IsEmptyValueIndirect(v) { - m["scaleDownFactor"] = v - } - if v := f.ScaleUpMinWorkerFraction; !dcl.IsEmptyValueIndirect(v) { - m["scaleUpMinWorkerFraction"] = v - } - if v := f.ScaleDownMinWorkerFraction; !dcl.IsEmptyValueIndirect(v) { - m["scaleDownMinWorkerFraction"] = v - } - - return m, nil -} - -// flattenAutoscalingPolicyBasicAlgorithmYarnConfig flattens an instance of AutoscalingPolicyBasicAlgorithmYarnConfig from a JSON -// response object. -func flattenAutoscalingPolicyBasicAlgorithmYarnConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyBasicAlgorithmYarnConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &AutoscalingPolicyBasicAlgorithmYarnConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyAutoscalingPolicyBasicAlgorithmYarnConfig - } - r.GracefulDecommissionTimeout = dcl.FlattenString(m["gracefulDecommissionTimeout"]) - r.ScaleUpFactor = dcl.FlattenDouble(m["scaleUpFactor"]) - r.ScaleDownFactor = dcl.FlattenDouble(m["scaleDownFactor"]) - r.ScaleUpMinWorkerFraction = dcl.FlattenDouble(m["scaleUpMinWorkerFraction"]) - r.ScaleDownMinWorkerFraction = dcl.FlattenDouble(m["scaleDownMinWorkerFraction"]) - - return r -} - -// expandAutoscalingPolicyWorkerConfigMap expands the contents of AutoscalingPolicyWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicyWorkerConfigMap(c *Client, f map[string]AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandAutoscalingPolicyWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandAutoscalingPolicyWorkerConfigSlice expands the contents of AutoscalingPolicyWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicyWorkerConfigSlice(c *Client, f []AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandAutoscalingPolicyWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenAutoscalingPolicyWorkerConfigMap flattens the contents of AutoscalingPolicyWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicyWorkerConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicyWorkerConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]AutoscalingPolicyWorkerConfig{} - } - - if len(a) == 0 { - return map[string]AutoscalingPolicyWorkerConfig{} - } - - items := make(map[string]AutoscalingPolicyWorkerConfig) - for k, item := range a { - items[k] = *flattenAutoscalingPolicyWorkerConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenAutoscalingPolicyWorkerConfigSlice flattens the contents of AutoscalingPolicyWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicyWorkerConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicyWorkerConfig { - a, ok := i.([]interface{}) - if !ok { - return []AutoscalingPolicyWorkerConfig{} - } - - if len(a) == 0 { - return []AutoscalingPolicyWorkerConfig{} - } - - items := make([]AutoscalingPolicyWorkerConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenAutoscalingPolicyWorkerConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandAutoscalingPolicyWorkerConfig expands an instance of AutoscalingPolicyWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicyWorkerConfig(c *Client, f *AutoscalingPolicyWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MinInstances; !dcl.IsEmptyValueIndirect(v) { - m["minInstances"] = v - } - if v := f.MaxInstances; !dcl.IsEmptyValueIndirect(v) { - m["maxInstances"] = v - } - if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { - m["weight"] = v - } - - return m, nil -} - -// flattenAutoscalingPolicyWorkerConfig flattens an instance of AutoscalingPolicyWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicyWorkerConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicyWorkerConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &AutoscalingPolicyWorkerConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyAutoscalingPolicyWorkerConfig - } - r.MinInstances = dcl.FlattenInteger(m["minInstances"]) - r.MaxInstances = dcl.FlattenInteger(m["maxInstances"]) - r.Weight = dcl.FlattenInteger(m["weight"]) - - return r -} - -// expandAutoscalingPolicySecondaryWorkerConfigMap expands the contents of AutoscalingPolicySecondaryWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicySecondaryWorkerConfigMap(c *Client, f map[string]AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandAutoscalingPolicySecondaryWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandAutoscalingPolicySecondaryWorkerConfigSlice expands the contents of AutoscalingPolicySecondaryWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, f []AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandAutoscalingPolicySecondaryWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenAutoscalingPolicySecondaryWorkerConfigMap flattens the contents of AutoscalingPolicySecondaryWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicySecondaryWorkerConfigMap(c *Client, i interface{}, res *AutoscalingPolicy) map[string]AutoscalingPolicySecondaryWorkerConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]AutoscalingPolicySecondaryWorkerConfig{} - } - - if len(a) == 0 { - return map[string]AutoscalingPolicySecondaryWorkerConfig{} - } - - items := make(map[string]AutoscalingPolicySecondaryWorkerConfig) - for k, item := range a { - items[k] = *flattenAutoscalingPolicySecondaryWorkerConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenAutoscalingPolicySecondaryWorkerConfigSlice flattens the contents of AutoscalingPolicySecondaryWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicySecondaryWorkerConfigSlice(c *Client, i interface{}, res *AutoscalingPolicy) []AutoscalingPolicySecondaryWorkerConfig { - a, ok := i.([]interface{}) - if !ok { - return []AutoscalingPolicySecondaryWorkerConfig{} - } - - if len(a) == 0 { - return []AutoscalingPolicySecondaryWorkerConfig{} - } - - items := make([]AutoscalingPolicySecondaryWorkerConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenAutoscalingPolicySecondaryWorkerConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandAutoscalingPolicySecondaryWorkerConfig expands an instance of AutoscalingPolicySecondaryWorkerConfig into a JSON -// request object. -func expandAutoscalingPolicySecondaryWorkerConfig(c *Client, f *AutoscalingPolicySecondaryWorkerConfig, res *AutoscalingPolicy) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MinInstances; !dcl.IsEmptyValueIndirect(v) { - m["minInstances"] = v - } - if v := f.MaxInstances; !dcl.IsEmptyValueIndirect(v) { - m["maxInstances"] = v - } - if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { - m["weight"] = v - } - - return m, nil -} - -// flattenAutoscalingPolicySecondaryWorkerConfig flattens an instance of AutoscalingPolicySecondaryWorkerConfig from a JSON -// response object. -func flattenAutoscalingPolicySecondaryWorkerConfig(c *Client, i interface{}, res *AutoscalingPolicy) *AutoscalingPolicySecondaryWorkerConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &AutoscalingPolicySecondaryWorkerConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyAutoscalingPolicySecondaryWorkerConfig - } - r.MinInstances = dcl.FlattenInteger(m["minInstances"]) - r.MaxInstances = dcl.FlattenInteger(m["maxInstances"]) - r.Weight = dcl.FlattenInteger(m["weight"]) - - return r -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *AutoscalingPolicy) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalAutoscalingPolicy(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type autoscalingPolicyDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp autoscalingPolicyApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToAutoscalingPolicyDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]autoscalingPolicyDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []autoscalingPolicyDiff - // For each operation name, create a autoscalingPolicyDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := autoscalingPolicyDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToAutoscalingPolicyApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToAutoscalingPolicyApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (autoscalingPolicyApiOperation, error) { - switch opName { - - case "updateAutoscalingPolicyUpdateAutoscalingPolicyOperation": - return &updateAutoscalingPolicyUpdateAutoscalingPolicyOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractAutoscalingPolicyFields(r *AutoscalingPolicy) error { - vBasicAlgorithm := r.BasicAlgorithm - if vBasicAlgorithm == nil { - // note: explicitly not the empty object. - vBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{} - } - if err := extractAutoscalingPolicyBasicAlgorithmFields(r, vBasicAlgorithm); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicAlgorithm) { - r.BasicAlgorithm = vBasicAlgorithm - } - vWorkerConfig := r.WorkerConfig - if vWorkerConfig == nil { - // note: explicitly not the empty object. - vWorkerConfig = &AutoscalingPolicyWorkerConfig{} - } - if err := extractAutoscalingPolicyWorkerConfigFields(r, vWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWorkerConfig) { - r.WorkerConfig = vWorkerConfig - } - vSecondaryWorkerConfig := r.SecondaryWorkerConfig - if vSecondaryWorkerConfig == nil { - // note: explicitly not the empty object. - vSecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{} - } - if err := extractAutoscalingPolicySecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { - r.SecondaryWorkerConfig = vSecondaryWorkerConfig - } - return nil -} -func extractAutoscalingPolicyBasicAlgorithmFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithm) error { - vYarnConfig := o.YarnConfig - if vYarnConfig == nil { - // note: explicitly not the empty object. - vYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - if err := extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r, vYarnConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYarnConfig) { - o.YarnConfig = vYarnConfig - } - return nil -} -func extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithmYarnConfig) error { - return nil -} -func extractAutoscalingPolicyWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyWorkerConfig) error { - return nil -} -func extractAutoscalingPolicySecondaryWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicySecondaryWorkerConfig) error { - return nil -} - -func postReadExtractAutoscalingPolicyFields(r *AutoscalingPolicy) error { - vBasicAlgorithm := r.BasicAlgorithm - if vBasicAlgorithm == nil { - // note: explicitly not the empty object. - vBasicAlgorithm = &AutoscalingPolicyBasicAlgorithm{} - } - if err := postReadExtractAutoscalingPolicyBasicAlgorithmFields(r, vBasicAlgorithm); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicAlgorithm) { - r.BasicAlgorithm = vBasicAlgorithm - } - vWorkerConfig := r.WorkerConfig - if vWorkerConfig == nil { - // note: explicitly not the empty object. - vWorkerConfig = &AutoscalingPolicyWorkerConfig{} - } - if err := postReadExtractAutoscalingPolicyWorkerConfigFields(r, vWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWorkerConfig) { - r.WorkerConfig = vWorkerConfig - } - vSecondaryWorkerConfig := r.SecondaryWorkerConfig - if vSecondaryWorkerConfig == nil { - // note: explicitly not the empty object. - vSecondaryWorkerConfig = &AutoscalingPolicySecondaryWorkerConfig{} - } - if err := postReadExtractAutoscalingPolicySecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { - r.SecondaryWorkerConfig = vSecondaryWorkerConfig - } - return nil -} -func postReadExtractAutoscalingPolicyBasicAlgorithmFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithm) error { - vYarnConfig := o.YarnConfig - if vYarnConfig == nil { - // note: explicitly not the empty object. - vYarnConfig = &AutoscalingPolicyBasicAlgorithmYarnConfig{} - } - if err := extractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r, vYarnConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYarnConfig) { - o.YarnConfig = vYarnConfig - } - return nil -} -func postReadExtractAutoscalingPolicyBasicAlgorithmYarnConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyBasicAlgorithmYarnConfig) error { - return nil -} -func postReadExtractAutoscalingPolicyWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicyWorkerConfig) error { - return nil -} -func postReadExtractAutoscalingPolicySecondaryWorkerConfigFields(r *AutoscalingPolicy, o *AutoscalingPolicySecondaryWorkerConfig) error { - return nil -} diff --git a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl deleted file mode 100644 index a1b03be133d0..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/autoscaling_policy_schema.go.tmpl +++ /dev/null @@ -1,250 +0,0 @@ -package dataproc - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLAutoscalingPolicySchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataproc/AutoscalingPolicy", - Description: "The Dataproc AutoscalingPolicy resource", - StructName: "AutoscalingPolicy", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a AutoscalingPolicy", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "autoscalingPolicy", - Required: true, - Description: "A full instance of a AutoscalingPolicy", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a AutoscalingPolicy", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "autoscalingPolicy", - Required: true, - Description: "A full instance of a AutoscalingPolicy", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a AutoscalingPolicy", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "autoscalingPolicy", - Required: true, - Description: "A full instance of a AutoscalingPolicy", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all AutoscalingPolicy", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many AutoscalingPolicy", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "AutoscalingPolicy": &dcl.Component{ - Title: "AutoscalingPolicy", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/autoscalingPolicies/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "basicAlgorithm", - "workerConfig", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "basicAlgorithm": &dcl.Property{ - Type: "object", - GoName: "BasicAlgorithm", - GoType: "AutoscalingPolicyBasicAlgorithm", - Required: []string{ - "yarnConfig", - }, - Properties: map[string]*dcl.Property{ - "cooldownPeriod": &dcl.Property{ - Type: "string", - GoName: "CooldownPeriod", - Description: "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: . Default: 2m.", - ServerDefault: true, - }, - "yarnConfig": &dcl.Property{ - Type: "object", - GoName: "YarnConfig", - GoType: "AutoscalingPolicyBasicAlgorithmYarnConfig", - Description: "Required. YARN autoscaling configuration.", - Required: []string{ - "gracefulDecommissionTimeout", - "scaleUpFactor", - "scaleDownFactor", - }, - Properties: map[string]*dcl.Property{ - "gracefulDecommissionTimeout": &dcl.Property{ - Type: "string", - GoName: "GracefulDecommissionTimeout", - Description: "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.", - }, - "scaleDownFactor": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "ScaleDownFactor", - Description: "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See .", - }, - "scaleDownMinWorkerFraction": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "ScaleDownMinWorkerFraction", - Description: "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: . Default: 0.0.", - }, - "scaleUpFactor": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "ScaleUpFactor", - Description: "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See .", - }, - "scaleUpMinWorkerFraction": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "ScaleUpMinWorkerFraction", - Description: "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: . Default: 0.0.", - }, - }, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.autoscalingPolicies`, the resource name of the policy has the following format: `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}` * For `projects.locations.autoscalingPolicies`, the resource name of the policy has the following format: `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "secondaryWorkerConfig": &dcl.Property{ - Type: "object", - GoName: "SecondaryWorkerConfig", - GoType: "AutoscalingPolicySecondaryWorkerConfig", - Description: "Optional. Describes how the autoscaler will operate for secondary workers.", - Properties: map[string]*dcl.Property{ - "maxInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxInstances", - Description: "Optional. Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", - }, - "minInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MinInstances", - Description: "Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0.", - }, - "weight": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Weight", - Description: "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", - ServerDefault: true, - }, - }, - }, - "workerConfig": &dcl.Property{ - Type: "object", - GoName: "WorkerConfig", - GoType: "AutoscalingPolicyWorkerConfig", - Description: "Required. Describes how the autoscaler will operate for primary workers.", - Required: []string{ - "maxInstances", - }, - Properties: map[string]*dcl.Property{ - "maxInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxInstances", - Description: "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", - }, - "minInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MinInstances", - Description: "Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0.", - ServerDefault: true, - }, - "weight": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Weight", - Description: "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", - ServerDefault: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl deleted file mode 100644 index e695420be558..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/cluster.go.tmpl +++ /dev/null @@ -1,3457 +0,0 @@ -package dataproc - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -type Cluster struct { - Project *string `json:"project"` - Name *string `json:"name"` - Config *ClusterConfig `json:"config"` - Labels map[string]string `json:"labels"` - Status *ClusterStatus `json:"status"` - StatusHistory []ClusterStatusHistory `json:"statusHistory"` - ClusterUuid *string `json:"clusterUuid"` - Metrics *ClusterMetrics `json:"metrics"` - Location *string `json:"location"` - VirtualClusterConfig *ClusterVirtualClusterConfig `json:"virtualClusterConfig"` -} - -func (r *Cluster) String() string { - return dcl.SprintResource(r) -} - -// The enum ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum. -type ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum string - -// ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef returns a *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s string) *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { - v := ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(s) - return &v -} - -func (v ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", "INHERIT_FROM_SUBNETWORK", "OUTBOUND", "BIDIRECTIONAL"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum. -type ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum string - -// ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef returns a *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s string) *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { - v := ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(s) - return &v -} - -func (v ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"TYPE_UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigMasterConfigPreemptibilityEnum. -type ClusterConfigMasterConfigPreemptibilityEnum string - -// ClusterConfigMasterConfigPreemptibilityEnumRef returns a *ClusterConfigMasterConfigPreemptibilityEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigMasterConfigPreemptibilityEnumRef(s string) *ClusterConfigMasterConfigPreemptibilityEnum { - v := ClusterConfigMasterConfigPreemptibilityEnum(s) - return &v -} - -func (v ClusterConfigMasterConfigPreemptibilityEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigMasterConfigPreemptibilityEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigWorkerConfigPreemptibilityEnum. -type ClusterConfigWorkerConfigPreemptibilityEnum string - -// ClusterConfigWorkerConfigPreemptibilityEnumRef returns a *ClusterConfigWorkerConfigPreemptibilityEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigWorkerConfigPreemptibilityEnumRef(s string) *ClusterConfigWorkerConfigPreemptibilityEnum { - v := ClusterConfigWorkerConfigPreemptibilityEnum(s) - return &v -} - -func (v ClusterConfigWorkerConfigPreemptibilityEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigWorkerConfigPreemptibilityEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigSecondaryWorkerConfigPreemptibilityEnum. -type ClusterConfigSecondaryWorkerConfigPreemptibilityEnum string - -// ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef returns a *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s string) *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { - v := ClusterConfigSecondaryWorkerConfigPreemptibilityEnum(s) - return &v -} - -func (v ClusterConfigSecondaryWorkerConfigPreemptibilityEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PREEMPTIBILITY_UNSPECIFIED", "NON_PREEMPTIBLE", "PREEMPTIBLE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigSecondaryWorkerConfigPreemptibilityEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigSoftwareConfigOptionalComponentsEnum. -type ClusterConfigSoftwareConfigOptionalComponentsEnum string - -// ClusterConfigSoftwareConfigOptionalComponentsEnumRef returns a *ClusterConfigSoftwareConfigOptionalComponentsEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigSoftwareConfigOptionalComponentsEnumRef(s string) *ClusterConfigSoftwareConfigOptionalComponentsEnum { - v := ClusterConfigSoftwareConfigOptionalComponentsEnum(s) - return &v -} - -func (v ClusterConfigSoftwareConfigOptionalComponentsEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "FLINK", "HBASE", "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigSoftwareConfigOptionalComponentsEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum. -type ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum string - -// ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef returns a *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef(s string) *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { - v := ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(s) - return &v -} - -func (v ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METRIC_SOURCE_UNSPECIFIED", "MONITORING_AGENT_DEFAULTS", "HDFS", "SPARK", "YARN", "SPARK_HISTORY_SERVER", "HIVESERVER2"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterStatusStateEnum. -type ClusterStatusStateEnum string - -// ClusterStatusStateEnumRef returns a *ClusterStatusStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterStatusStateEnumRef(s string) *ClusterStatusStateEnum { - v := ClusterStatusStateEnum(s) - return &v -} - -func (v ClusterStatusStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"UNKNOWN", "CREATING", "RUNNING", "ERROR", "DELETING", "UPDATING", "STOPPING", "STOPPED", "STARTING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterStatusStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterStatusSubstateEnum. -type ClusterStatusSubstateEnum string - -// ClusterStatusSubstateEnumRef returns a *ClusterStatusSubstateEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterStatusSubstateEnumRef(s string) *ClusterStatusSubstateEnum { - v := ClusterStatusSubstateEnum(s) - return &v -} - -func (v ClusterStatusSubstateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"UNSPECIFIED", "UNHEALTHY", "STALE_STATUS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterStatusSubstateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterStatusHistoryStateEnum. -type ClusterStatusHistoryStateEnum string - -// ClusterStatusHistoryStateEnumRef returns a *ClusterStatusHistoryStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterStatusHistoryStateEnumRef(s string) *ClusterStatusHistoryStateEnum { - v := ClusterStatusHistoryStateEnum(s) - return &v -} - -func (v ClusterStatusHistoryStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"UNKNOWN", "CREATING", "RUNNING", "ERROR", "DELETING", "UPDATING", "STOPPING", "STOPPED", "STARTING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterStatusHistoryStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterStatusHistorySubstateEnum. -type ClusterStatusHistorySubstateEnum string - -// ClusterStatusHistorySubstateEnumRef returns a *ClusterStatusHistorySubstateEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterStatusHistorySubstateEnumRef(s string) *ClusterStatusHistorySubstateEnum { - v := ClusterStatusHistorySubstateEnum(s) - return &v -} - -func (v ClusterStatusHistorySubstateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"UNSPECIFIED", "UNHEALTHY", "STALE_STATUS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterStatusHistorySubstateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum. -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum string - -// ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef returns a *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum with the value of string s -// If the empty string is provided, nil is returned. -func ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef(s string) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { - v := ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(s) - return &v -} - -func (v ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ROLE_UNSPECIFIED", "DEFAULT", "CONTROLLER", "SPARK_DRIVER", "SPARK_EXECUTOR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum", - Value: string(v), - Valid: []string{}, - } -} - -type ClusterConfig struct { - empty bool `json:"-"` - StagingBucket *string `json:"stagingBucket"` - TempBucket *string `json:"tempBucket"` - GceClusterConfig *ClusterConfigGceClusterConfig `json:"gceClusterConfig"` - MasterConfig *ClusterConfigMasterConfig `json:"masterConfig"` - WorkerConfig *ClusterConfigWorkerConfig `json:"workerConfig"` - SecondaryWorkerConfig *ClusterConfigSecondaryWorkerConfig `json:"secondaryWorkerConfig"` - SoftwareConfig *ClusterConfigSoftwareConfig `json:"softwareConfig"` - InitializationActions []ClusterConfigInitializationActions `json:"initializationActions"` - EncryptionConfig *ClusterConfigEncryptionConfig `json:"encryptionConfig"` - AutoscalingConfig *ClusterConfigAutoscalingConfig `json:"autoscalingConfig"` - SecurityConfig *ClusterConfigSecurityConfig `json:"securityConfig"` - LifecycleConfig *ClusterConfigLifecycleConfig `json:"lifecycleConfig"` - EndpointConfig *ClusterConfigEndpointConfig `json:"endpointConfig"` - GkeClusterConfig *ClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` - MetastoreConfig *ClusterConfigMetastoreConfig `json:"metastoreConfig"` - DataprocMetricConfig *ClusterConfigDataprocMetricConfig `json:"dataprocMetricConfig"` -} - -type jsonClusterConfig ClusterConfig - -func (r *ClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfig - } else { - - r.StagingBucket = res.StagingBucket - - r.TempBucket = res.TempBucket - - r.GceClusterConfig = res.GceClusterConfig - - r.MasterConfig = res.MasterConfig - - r.WorkerConfig = res.WorkerConfig - - r.SecondaryWorkerConfig = res.SecondaryWorkerConfig - - r.SoftwareConfig = res.SoftwareConfig - - r.InitializationActions = res.InitializationActions - - r.EncryptionConfig = res.EncryptionConfig - - r.AutoscalingConfig = res.AutoscalingConfig - - r.SecurityConfig = res.SecurityConfig - - r.LifecycleConfig = res.LifecycleConfig - - r.EndpointConfig = res.EndpointConfig - - r.GkeClusterConfig = res.GkeClusterConfig - - r.MetastoreConfig = res.MetastoreConfig - - r.DataprocMetricConfig = res.DataprocMetricConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfig *ClusterConfig = &ClusterConfig{empty: true} - -func (r *ClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGceClusterConfig struct { - empty bool `json:"-"` - Zone *string `json:"zone"` - Network *string `json:"network"` - Subnetwork *string `json:"subnetwork"` - InternalIPOnly *bool `json:"internalIPOnly"` - PrivateIPv6GoogleAccess *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum `json:"privateIPv6GoogleAccess"` - ServiceAccount *string `json:"serviceAccount"` - ServiceAccountScopes []string `json:"serviceAccountScopes"` - Tags []string `json:"tags"` - Metadata map[string]string `json:"metadata"` - ReservationAffinity *ClusterConfigGceClusterConfigReservationAffinity `json:"reservationAffinity"` - NodeGroupAffinity *ClusterConfigGceClusterConfigNodeGroupAffinity `json:"nodeGroupAffinity"` - ShieldedInstanceConfig *ClusterConfigGceClusterConfigShieldedInstanceConfig `json:"shieldedInstanceConfig"` - ConfidentialInstanceConfig *ClusterConfigGceClusterConfigConfidentialInstanceConfig `json:"confidentialInstanceConfig"` -} - -type jsonClusterConfigGceClusterConfig ClusterConfigGceClusterConfig - -func (r *ClusterConfigGceClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGceClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGceClusterConfig - } else { - - r.Zone = res.Zone - - r.Network = res.Network - - r.Subnetwork = res.Subnetwork - - r.InternalIPOnly = res.InternalIPOnly - - r.PrivateIPv6GoogleAccess = res.PrivateIPv6GoogleAccess - - r.ServiceAccount = res.ServiceAccount - - r.ServiceAccountScopes = res.ServiceAccountScopes - - r.Tags = res.Tags - - r.Metadata = res.Metadata - - r.ReservationAffinity = res.ReservationAffinity - - r.NodeGroupAffinity = res.NodeGroupAffinity - - r.ShieldedInstanceConfig = res.ShieldedInstanceConfig - - r.ConfidentialInstanceConfig = res.ConfidentialInstanceConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGceClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGceClusterConfig *ClusterConfigGceClusterConfig = &ClusterConfigGceClusterConfig{empty: true} - -func (r *ClusterConfigGceClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGceClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGceClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGceClusterConfigReservationAffinity struct { - empty bool `json:"-"` - ConsumeReservationType *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum `json:"consumeReservationType"` - Key *string `json:"key"` - Values []string `json:"values"` -} - -type jsonClusterConfigGceClusterConfigReservationAffinity ClusterConfigGceClusterConfigReservationAffinity - -func (r *ClusterConfigGceClusterConfigReservationAffinity) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGceClusterConfigReservationAffinity - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGceClusterConfigReservationAffinity - } else { - - r.ConsumeReservationType = res.ConsumeReservationType - - r.Key = res.Key - - r.Values = res.Values - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGceClusterConfigReservationAffinity is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGceClusterConfigReservationAffinity *ClusterConfigGceClusterConfigReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{empty: true} - -func (r *ClusterConfigGceClusterConfigReservationAffinity) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGceClusterConfigReservationAffinity) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGceClusterConfigReservationAffinity) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGceClusterConfigNodeGroupAffinity struct { - empty bool `json:"-"` - NodeGroup *string `json:"nodeGroup"` -} - -type jsonClusterConfigGceClusterConfigNodeGroupAffinity ClusterConfigGceClusterConfigNodeGroupAffinity - -func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGceClusterConfigNodeGroupAffinity - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGceClusterConfigNodeGroupAffinity - } else { - - r.NodeGroup = res.NodeGroup - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGceClusterConfigNodeGroupAffinity is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGceClusterConfigNodeGroupAffinity *ClusterConfigGceClusterConfigNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{empty: true} - -func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGceClusterConfigShieldedInstanceConfig struct { - empty bool `json:"-"` - EnableSecureBoot *bool `json:"enableSecureBoot"` - EnableVtpm *bool `json:"enableVtpm"` - EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring"` -} - -type jsonClusterConfigGceClusterConfigShieldedInstanceConfig ClusterConfigGceClusterConfigShieldedInstanceConfig - -func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGceClusterConfigShieldedInstanceConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGceClusterConfigShieldedInstanceConfig - } else { - - r.EnableSecureBoot = res.EnableSecureBoot - - r.EnableVtpm = res.EnableVtpm - - r.EnableIntegrityMonitoring = res.EnableIntegrityMonitoring - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGceClusterConfigShieldedInstanceConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGceClusterConfigShieldedInstanceConfig *ClusterConfigGceClusterConfigShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{empty: true} - -func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGceClusterConfigConfidentialInstanceConfig struct { - empty bool `json:"-"` - EnableConfidentialCompute *bool `json:"enableConfidentialCompute"` -} - -type jsonClusterConfigGceClusterConfigConfidentialInstanceConfig ClusterConfigGceClusterConfigConfidentialInstanceConfig - -func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGceClusterConfigConfidentialInstanceConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig - } else { - - r.EnableConfidentialCompute = res.EnableConfidentialCompute - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGceClusterConfigConfidentialInstanceConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig *ClusterConfigGceClusterConfigConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{empty: true} - -func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMasterConfig struct { - empty bool `json:"-"` - NumInstances *int64 `json:"numInstances"` - InstanceNames []string `json:"instanceNames"` - Image *string `json:"image"` - MachineType *string `json:"machineType"` - DiskConfig *ClusterConfigMasterConfigDiskConfig `json:"diskConfig"` - IsPreemptible *bool `json:"isPreemptible"` - Preemptibility *ClusterConfigMasterConfigPreemptibilityEnum `json:"preemptibility"` - ManagedGroupConfig *ClusterConfigMasterConfigManagedGroupConfig `json:"managedGroupConfig"` - Accelerators []ClusterConfigMasterConfigAccelerators `json:"accelerators"` - MinCpuPlatform *string `json:"minCpuPlatform"` - InstanceReferences []ClusterConfigMasterConfigInstanceReferences `json:"instanceReferences"` -} - -type jsonClusterConfigMasterConfig ClusterConfigMasterConfig - -func (r *ClusterConfigMasterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMasterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMasterConfig - } else { - - r.NumInstances = res.NumInstances - - r.InstanceNames = res.InstanceNames - - r.Image = res.Image - - r.MachineType = res.MachineType - - r.DiskConfig = res.DiskConfig - - r.IsPreemptible = res.IsPreemptible - - r.Preemptibility = res.Preemptibility - - r.ManagedGroupConfig = res.ManagedGroupConfig - - r.Accelerators = res.Accelerators - - r.MinCpuPlatform = res.MinCpuPlatform - - r.InstanceReferences = res.InstanceReferences - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMasterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMasterConfig *ClusterConfigMasterConfig = &ClusterConfigMasterConfig{empty: true} - -func (r *ClusterConfigMasterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMasterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMasterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMasterConfigDiskConfig struct { - empty bool `json:"-"` - BootDiskType *string `json:"bootDiskType"` - BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` - NumLocalSsds *int64 `json:"numLocalSsds"` - LocalSsdInterface *string `json:"localSsdInterface"` -} - -type jsonClusterConfigMasterConfigDiskConfig ClusterConfigMasterConfigDiskConfig - -func (r *ClusterConfigMasterConfigDiskConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMasterConfigDiskConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMasterConfigDiskConfig - } else { - - r.BootDiskType = res.BootDiskType - - r.BootDiskSizeGb = res.BootDiskSizeGb - - r.NumLocalSsds = res.NumLocalSsds - - r.LocalSsdInterface = res.LocalSsdInterface - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMasterConfigDiskConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMasterConfigDiskConfig *ClusterConfigMasterConfigDiskConfig = &ClusterConfigMasterConfigDiskConfig{empty: true} - -func (r *ClusterConfigMasterConfigDiskConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMasterConfigDiskConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMasterConfigDiskConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMasterConfigManagedGroupConfig struct { - empty bool `json:"-"` - InstanceTemplateName *string `json:"instanceTemplateName"` - InstanceGroupManagerName *string `json:"instanceGroupManagerName"` -} - -type jsonClusterConfigMasterConfigManagedGroupConfig ClusterConfigMasterConfigManagedGroupConfig - -func (r *ClusterConfigMasterConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMasterConfigManagedGroupConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMasterConfigManagedGroupConfig - } else { - - r.InstanceTemplateName = res.InstanceTemplateName - - r.InstanceGroupManagerName = res.InstanceGroupManagerName - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMasterConfigManagedGroupConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMasterConfigManagedGroupConfig *ClusterConfigMasterConfigManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{empty: true} - -func (r *ClusterConfigMasterConfigManagedGroupConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMasterConfigManagedGroupConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMasterConfigManagedGroupConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMasterConfigAccelerators struct { - empty bool `json:"-"` - AcceleratorType *string `json:"acceleratorType"` - AcceleratorCount *int64 `json:"acceleratorCount"` -} - -type jsonClusterConfigMasterConfigAccelerators ClusterConfigMasterConfigAccelerators - -func (r *ClusterConfigMasterConfigAccelerators) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMasterConfigAccelerators - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMasterConfigAccelerators - } else { - - r.AcceleratorType = res.AcceleratorType - - r.AcceleratorCount = res.AcceleratorCount - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMasterConfigAccelerators is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMasterConfigAccelerators *ClusterConfigMasterConfigAccelerators = &ClusterConfigMasterConfigAccelerators{empty: true} - -func (r *ClusterConfigMasterConfigAccelerators) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMasterConfigAccelerators) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMasterConfigAccelerators) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMasterConfigInstanceReferences struct { - empty bool `json:"-"` - InstanceName *string `json:"instanceName"` - InstanceId *string `json:"instanceId"` - PublicKey *string `json:"publicKey"` - PublicEciesKey *string `json:"publicEciesKey"` -} - -type jsonClusterConfigMasterConfigInstanceReferences ClusterConfigMasterConfigInstanceReferences - -func (r *ClusterConfigMasterConfigInstanceReferences) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMasterConfigInstanceReferences - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMasterConfigInstanceReferences - } else { - - r.InstanceName = res.InstanceName - - r.InstanceId = res.InstanceId - - r.PublicKey = res.PublicKey - - r.PublicEciesKey = res.PublicEciesKey - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMasterConfigInstanceReferences is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMasterConfigInstanceReferences *ClusterConfigMasterConfigInstanceReferences = &ClusterConfigMasterConfigInstanceReferences{empty: true} - -func (r *ClusterConfigMasterConfigInstanceReferences) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMasterConfigInstanceReferences) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMasterConfigInstanceReferences) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigWorkerConfig struct { - empty bool `json:"-"` - NumInstances *int64 `json:"numInstances"` - InstanceNames []string `json:"instanceNames"` - Image *string `json:"image"` - MachineType *string `json:"machineType"` - DiskConfig *ClusterConfigWorkerConfigDiskConfig `json:"diskConfig"` - IsPreemptible *bool `json:"isPreemptible"` - Preemptibility *ClusterConfigWorkerConfigPreemptibilityEnum `json:"preemptibility"` - ManagedGroupConfig *ClusterConfigWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` - Accelerators []ClusterConfigWorkerConfigAccelerators `json:"accelerators"` - MinCpuPlatform *string `json:"minCpuPlatform"` - InstanceReferences []ClusterConfigWorkerConfigInstanceReferences `json:"instanceReferences"` -} - -type jsonClusterConfigWorkerConfig ClusterConfigWorkerConfig - -func (r *ClusterConfigWorkerConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigWorkerConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigWorkerConfig - } else { - - r.NumInstances = res.NumInstances - - r.InstanceNames = res.InstanceNames - - r.Image = res.Image - - r.MachineType = res.MachineType - - r.DiskConfig = res.DiskConfig - - r.IsPreemptible = res.IsPreemptible - - r.Preemptibility = res.Preemptibility - - r.ManagedGroupConfig = res.ManagedGroupConfig - - r.Accelerators = res.Accelerators - - r.MinCpuPlatform = res.MinCpuPlatform - - r.InstanceReferences = res.InstanceReferences - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigWorkerConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigWorkerConfig *ClusterConfigWorkerConfig = &ClusterConfigWorkerConfig{empty: true} - -func (r *ClusterConfigWorkerConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigWorkerConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigWorkerConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigWorkerConfigDiskConfig struct { - empty bool `json:"-"` - BootDiskType *string `json:"bootDiskType"` - BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` - NumLocalSsds *int64 `json:"numLocalSsds"` - LocalSsdInterface *string `json:"localSsdInterface"` -} - -type jsonClusterConfigWorkerConfigDiskConfig ClusterConfigWorkerConfigDiskConfig - -func (r *ClusterConfigWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigWorkerConfigDiskConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigWorkerConfigDiskConfig - } else { - - r.BootDiskType = res.BootDiskType - - r.BootDiskSizeGb = res.BootDiskSizeGb - - r.NumLocalSsds = res.NumLocalSsds - - r.LocalSsdInterface = res.LocalSsdInterface - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigWorkerConfigDiskConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigWorkerConfigDiskConfig *ClusterConfigWorkerConfigDiskConfig = &ClusterConfigWorkerConfigDiskConfig{empty: true} - -func (r *ClusterConfigWorkerConfigDiskConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigWorkerConfigDiskConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigWorkerConfigDiskConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigWorkerConfigManagedGroupConfig struct { - empty bool `json:"-"` - InstanceTemplateName *string `json:"instanceTemplateName"` - InstanceGroupManagerName *string `json:"instanceGroupManagerName"` -} - -type jsonClusterConfigWorkerConfigManagedGroupConfig ClusterConfigWorkerConfigManagedGroupConfig - -func (r *ClusterConfigWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigWorkerConfigManagedGroupConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigWorkerConfigManagedGroupConfig - } else { - - r.InstanceTemplateName = res.InstanceTemplateName - - r.InstanceGroupManagerName = res.InstanceGroupManagerName - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigWorkerConfigManagedGroupConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigWorkerConfigManagedGroupConfig *ClusterConfigWorkerConfigManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{empty: true} - -func (r *ClusterConfigWorkerConfigManagedGroupConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigWorkerConfigManagedGroupConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigWorkerConfigManagedGroupConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigWorkerConfigAccelerators struct { - empty bool `json:"-"` - AcceleratorType *string `json:"acceleratorType"` - AcceleratorCount *int64 `json:"acceleratorCount"` -} - -type jsonClusterConfigWorkerConfigAccelerators ClusterConfigWorkerConfigAccelerators - -func (r *ClusterConfigWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigWorkerConfigAccelerators - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigWorkerConfigAccelerators - } else { - - r.AcceleratorType = res.AcceleratorType - - r.AcceleratorCount = res.AcceleratorCount - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigWorkerConfigAccelerators is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigWorkerConfigAccelerators *ClusterConfigWorkerConfigAccelerators = &ClusterConfigWorkerConfigAccelerators{empty: true} - -func (r *ClusterConfigWorkerConfigAccelerators) Empty() bool { - return r.empty -} - -func (r *ClusterConfigWorkerConfigAccelerators) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigWorkerConfigAccelerators) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigWorkerConfigInstanceReferences struct { - empty bool `json:"-"` - InstanceName *string `json:"instanceName"` - InstanceId *string `json:"instanceId"` - PublicKey *string `json:"publicKey"` - PublicEciesKey *string `json:"publicEciesKey"` -} - -type jsonClusterConfigWorkerConfigInstanceReferences ClusterConfigWorkerConfigInstanceReferences - -func (r *ClusterConfigWorkerConfigInstanceReferences) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigWorkerConfigInstanceReferences - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigWorkerConfigInstanceReferences - } else { - - r.InstanceName = res.InstanceName - - r.InstanceId = res.InstanceId - - r.PublicKey = res.PublicKey - - r.PublicEciesKey = res.PublicEciesKey - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigWorkerConfigInstanceReferences is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigWorkerConfigInstanceReferences *ClusterConfigWorkerConfigInstanceReferences = &ClusterConfigWorkerConfigInstanceReferences{empty: true} - -func (r *ClusterConfigWorkerConfigInstanceReferences) Empty() bool { - return r.empty -} - -func (r *ClusterConfigWorkerConfigInstanceReferences) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigWorkerConfigInstanceReferences) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecondaryWorkerConfig struct { - empty bool `json:"-"` - NumInstances *int64 `json:"numInstances"` - InstanceNames []string `json:"instanceNames"` - Image *string `json:"image"` - MachineType *string `json:"machineType"` - DiskConfig *ClusterConfigSecondaryWorkerConfigDiskConfig `json:"diskConfig"` - IsPreemptible *bool `json:"isPreemptible"` - Preemptibility *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum `json:"preemptibility"` - ManagedGroupConfig *ClusterConfigSecondaryWorkerConfigManagedGroupConfig `json:"managedGroupConfig"` - Accelerators []ClusterConfigSecondaryWorkerConfigAccelerators `json:"accelerators"` - MinCpuPlatform *string `json:"minCpuPlatform"` - InstanceReferences []ClusterConfigSecondaryWorkerConfigInstanceReferences `json:"instanceReferences"` -} - -type jsonClusterConfigSecondaryWorkerConfig ClusterConfigSecondaryWorkerConfig - -func (r *ClusterConfigSecondaryWorkerConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecondaryWorkerConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecondaryWorkerConfig - } else { - - r.NumInstances = res.NumInstances - - r.InstanceNames = res.InstanceNames - - r.Image = res.Image - - r.MachineType = res.MachineType - - r.DiskConfig = res.DiskConfig - - r.IsPreemptible = res.IsPreemptible - - r.Preemptibility = res.Preemptibility - - r.ManagedGroupConfig = res.ManagedGroupConfig - - r.Accelerators = res.Accelerators - - r.MinCpuPlatform = res.MinCpuPlatform - - r.InstanceReferences = res.InstanceReferences - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecondaryWorkerConfig *ClusterConfigSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{empty: true} - -func (r *ClusterConfigSecondaryWorkerConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecondaryWorkerConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecondaryWorkerConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecondaryWorkerConfigDiskConfig struct { - empty bool `json:"-"` - BootDiskType *string `json:"bootDiskType"` - BootDiskSizeGb *int64 `json:"bootDiskSizeGb"` - NumLocalSsds *int64 `json:"numLocalSsds"` - LocalSsdInterface *string `json:"localSsdInterface"` -} - -type jsonClusterConfigSecondaryWorkerConfigDiskConfig ClusterConfigSecondaryWorkerConfigDiskConfig - -func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecondaryWorkerConfigDiskConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecondaryWorkerConfigDiskConfig - } else { - - r.BootDiskType = res.BootDiskType - - r.BootDiskSizeGb = res.BootDiskSizeGb - - r.NumLocalSsds = res.NumLocalSsds - - r.LocalSsdInterface = res.LocalSsdInterface - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigDiskConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecondaryWorkerConfigDiskConfig *ClusterConfigSecondaryWorkerConfigDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{empty: true} - -func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecondaryWorkerConfigManagedGroupConfig struct { - empty bool `json:"-"` - InstanceTemplateName *string `json:"instanceTemplateName"` - InstanceGroupManagerName *string `json:"instanceGroupManagerName"` -} - -type jsonClusterConfigSecondaryWorkerConfigManagedGroupConfig ClusterConfigSecondaryWorkerConfigManagedGroupConfig - -func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecondaryWorkerConfigManagedGroupConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig - } else { - - r.InstanceTemplateName = res.InstanceTemplateName - - r.InstanceGroupManagerName = res.InstanceGroupManagerName - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigManagedGroupConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig *ClusterConfigSecondaryWorkerConfigManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{empty: true} - -func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecondaryWorkerConfigAccelerators struct { - empty bool `json:"-"` - AcceleratorType *string `json:"acceleratorType"` - AcceleratorCount *int64 `json:"acceleratorCount"` -} - -type jsonClusterConfigSecondaryWorkerConfigAccelerators ClusterConfigSecondaryWorkerConfigAccelerators - -func (r *ClusterConfigSecondaryWorkerConfigAccelerators) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecondaryWorkerConfigAccelerators - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecondaryWorkerConfigAccelerators - } else { - - r.AcceleratorType = res.AcceleratorType - - r.AcceleratorCount = res.AcceleratorCount - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigAccelerators is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecondaryWorkerConfigAccelerators *ClusterConfigSecondaryWorkerConfigAccelerators = &ClusterConfigSecondaryWorkerConfigAccelerators{empty: true} - -func (r *ClusterConfigSecondaryWorkerConfigAccelerators) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecondaryWorkerConfigAccelerators) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecondaryWorkerConfigAccelerators) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecondaryWorkerConfigInstanceReferences struct { - empty bool `json:"-"` - InstanceName *string `json:"instanceName"` - InstanceId *string `json:"instanceId"` - PublicKey *string `json:"publicKey"` - PublicEciesKey *string `json:"publicEciesKey"` -} - -type jsonClusterConfigSecondaryWorkerConfigInstanceReferences ClusterConfigSecondaryWorkerConfigInstanceReferences - -func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecondaryWorkerConfigInstanceReferences - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecondaryWorkerConfigInstanceReferences - } else { - - r.InstanceName = res.InstanceName - - r.InstanceId = res.InstanceId - - r.PublicKey = res.PublicKey - - r.PublicEciesKey = res.PublicEciesKey - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecondaryWorkerConfigInstanceReferences is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecondaryWorkerConfigInstanceReferences *ClusterConfigSecondaryWorkerConfigInstanceReferences = &ClusterConfigSecondaryWorkerConfigInstanceReferences{empty: true} - -func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSoftwareConfig struct { - empty bool `json:"-"` - ImageVersion *string `json:"imageVersion"` - Properties map[string]string `json:"properties"` - OptionalComponents []ClusterConfigSoftwareConfigOptionalComponentsEnum `json:"optionalComponents"` -} - -type jsonClusterConfigSoftwareConfig ClusterConfigSoftwareConfig - -func (r *ClusterConfigSoftwareConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSoftwareConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSoftwareConfig - } else { - - r.ImageVersion = res.ImageVersion - - r.Properties = res.Properties - - r.OptionalComponents = res.OptionalComponents - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSoftwareConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSoftwareConfig *ClusterConfigSoftwareConfig = &ClusterConfigSoftwareConfig{empty: true} - -func (r *ClusterConfigSoftwareConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSoftwareConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSoftwareConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigInitializationActions struct { - empty bool `json:"-"` - ExecutableFile *string `json:"executableFile"` - ExecutionTimeout *string `json:"executionTimeout"` -} - -type jsonClusterConfigInitializationActions ClusterConfigInitializationActions - -func (r *ClusterConfigInitializationActions) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigInitializationActions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigInitializationActions - } else { - - r.ExecutableFile = res.ExecutableFile - - r.ExecutionTimeout = res.ExecutionTimeout - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigInitializationActions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigInitializationActions *ClusterConfigInitializationActions = &ClusterConfigInitializationActions{empty: true} - -func (r *ClusterConfigInitializationActions) Empty() bool { - return r.empty -} - -func (r *ClusterConfigInitializationActions) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigInitializationActions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigEncryptionConfig struct { - empty bool `json:"-"` - GcePdKmsKeyName *string `json:"gcePdKmsKeyName"` -} - -type jsonClusterConfigEncryptionConfig ClusterConfigEncryptionConfig - -func (r *ClusterConfigEncryptionConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigEncryptionConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigEncryptionConfig - } else { - - r.GcePdKmsKeyName = res.GcePdKmsKeyName - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigEncryptionConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigEncryptionConfig *ClusterConfigEncryptionConfig = &ClusterConfigEncryptionConfig{empty: true} - -func (r *ClusterConfigEncryptionConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigEncryptionConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigEncryptionConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigAutoscalingConfig struct { - empty bool `json:"-"` - Policy *string `json:"policy"` -} - -type jsonClusterConfigAutoscalingConfig ClusterConfigAutoscalingConfig - -func (r *ClusterConfigAutoscalingConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigAutoscalingConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigAutoscalingConfig - } else { - - r.Policy = res.Policy - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigAutoscalingConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigAutoscalingConfig *ClusterConfigAutoscalingConfig = &ClusterConfigAutoscalingConfig{empty: true} - -func (r *ClusterConfigAutoscalingConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigAutoscalingConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigAutoscalingConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecurityConfig struct { - empty bool `json:"-"` - KerberosConfig *ClusterConfigSecurityConfigKerberosConfig `json:"kerberosConfig"` - IdentityConfig *ClusterConfigSecurityConfigIdentityConfig `json:"identityConfig"` -} - -type jsonClusterConfigSecurityConfig ClusterConfigSecurityConfig - -func (r *ClusterConfigSecurityConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecurityConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecurityConfig - } else { - - r.KerberosConfig = res.KerberosConfig - - r.IdentityConfig = res.IdentityConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecurityConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecurityConfig *ClusterConfigSecurityConfig = &ClusterConfigSecurityConfig{empty: true} - -func (r *ClusterConfigSecurityConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecurityConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecurityConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecurityConfigKerberosConfig struct { - empty bool `json:"-"` - EnableKerberos *bool `json:"enableKerberos"` - RootPrincipalPassword *string `json:"rootPrincipalPassword"` - KmsKey *string `json:"kmsKey"` - Keystore *string `json:"keystore"` - Truststore *string `json:"truststore"` - KeystorePassword *string `json:"keystorePassword"` - KeyPassword *string `json:"keyPassword"` - TruststorePassword *string `json:"truststorePassword"` - CrossRealmTrustRealm *string `json:"crossRealmTrustRealm"` - CrossRealmTrustKdc *string `json:"crossRealmTrustKdc"` - CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer"` - CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword"` - KdcDbKey *string `json:"kdcDbKey"` - TgtLifetimeHours *int64 `json:"tgtLifetimeHours"` - Realm *string `json:"realm"` -} - -type jsonClusterConfigSecurityConfigKerberosConfig ClusterConfigSecurityConfigKerberosConfig - -func (r *ClusterConfigSecurityConfigKerberosConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecurityConfigKerberosConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecurityConfigKerberosConfig - } else { - - r.EnableKerberos = res.EnableKerberos - - r.RootPrincipalPassword = res.RootPrincipalPassword - - r.KmsKey = res.KmsKey - - r.Keystore = res.Keystore - - r.Truststore = res.Truststore - - r.KeystorePassword = res.KeystorePassword - - r.KeyPassword = res.KeyPassword - - r.TruststorePassword = res.TruststorePassword - - r.CrossRealmTrustRealm = res.CrossRealmTrustRealm - - r.CrossRealmTrustKdc = res.CrossRealmTrustKdc - - r.CrossRealmTrustAdminServer = res.CrossRealmTrustAdminServer - - r.CrossRealmTrustSharedPassword = res.CrossRealmTrustSharedPassword - - r.KdcDbKey = res.KdcDbKey - - r.TgtLifetimeHours = res.TgtLifetimeHours - - r.Realm = res.Realm - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecurityConfigKerberosConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecurityConfigKerberosConfig *ClusterConfigSecurityConfigKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{empty: true} - -func (r *ClusterConfigSecurityConfigKerberosConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecurityConfigKerberosConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecurityConfigKerberosConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigSecurityConfigIdentityConfig struct { - empty bool `json:"-"` - UserServiceAccountMapping map[string]string `json:"userServiceAccountMapping"` -} - -type jsonClusterConfigSecurityConfigIdentityConfig ClusterConfigSecurityConfigIdentityConfig - -func (r *ClusterConfigSecurityConfigIdentityConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigSecurityConfigIdentityConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigSecurityConfigIdentityConfig - } else { - - r.UserServiceAccountMapping = res.UserServiceAccountMapping - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigSecurityConfigIdentityConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigSecurityConfigIdentityConfig *ClusterConfigSecurityConfigIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{empty: true} - -func (r *ClusterConfigSecurityConfigIdentityConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigSecurityConfigIdentityConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigSecurityConfigIdentityConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigLifecycleConfig struct { - empty bool `json:"-"` - IdleDeleteTtl *string `json:"idleDeleteTtl"` - AutoDeleteTime *string `json:"autoDeleteTime"` - AutoDeleteTtl *string `json:"autoDeleteTtl"` - IdleStartTime *string `json:"idleStartTime"` -} - -type jsonClusterConfigLifecycleConfig ClusterConfigLifecycleConfig - -func (r *ClusterConfigLifecycleConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigLifecycleConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigLifecycleConfig - } else { - - r.IdleDeleteTtl = res.IdleDeleteTtl - - r.AutoDeleteTime = res.AutoDeleteTime - - r.AutoDeleteTtl = res.AutoDeleteTtl - - r.IdleStartTime = res.IdleStartTime - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigLifecycleConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigLifecycleConfig *ClusterConfigLifecycleConfig = &ClusterConfigLifecycleConfig{empty: true} - -func (r *ClusterConfigLifecycleConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigLifecycleConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigLifecycleConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigEndpointConfig struct { - empty bool `json:"-"` - HttpPorts map[string]string `json:"httpPorts"` - EnableHttpPortAccess *bool `json:"enableHttpPortAccess"` -} - -type jsonClusterConfigEndpointConfig ClusterConfigEndpointConfig - -func (r *ClusterConfigEndpointConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigEndpointConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigEndpointConfig - } else { - - r.HttpPorts = res.HttpPorts - - r.EnableHttpPortAccess = res.EnableHttpPortAccess - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigEndpointConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigEndpointConfig *ClusterConfigEndpointConfig = &ClusterConfigEndpointConfig{empty: true} - -func (r *ClusterConfigEndpointConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigEndpointConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigEndpointConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGkeClusterConfig struct { - empty bool `json:"-"` - NamespacedGkeDeploymentTarget *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget `json:"namespacedGkeDeploymentTarget"` -} - -type jsonClusterConfigGkeClusterConfig ClusterConfigGkeClusterConfig - -func (r *ClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGkeClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGkeClusterConfig - } else { - - r.NamespacedGkeDeploymentTarget = res.NamespacedGkeDeploymentTarget - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGkeClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGkeClusterConfig *ClusterConfigGkeClusterConfig = &ClusterConfigGkeClusterConfig{empty: true} - -func (r *ClusterConfigGkeClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGkeClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGkeClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget struct { - empty bool `json:"-"` - TargetGkeCluster *string `json:"targetGkeCluster"` - ClusterNamespace *string `json:"clusterNamespace"` -} - -type jsonClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - -func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - } else { - - r.TargetGkeCluster = res.TargetGkeCluster - - r.ClusterNamespace = res.ClusterNamespace - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{empty: true} - -func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) Empty() bool { - return r.empty -} - -func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigMetastoreConfig struct { - empty bool `json:"-"` - DataprocMetastoreService *string `json:"dataprocMetastoreService"` -} - -type jsonClusterConfigMetastoreConfig ClusterConfigMetastoreConfig - -func (r *ClusterConfigMetastoreConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigMetastoreConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigMetastoreConfig - } else { - - r.DataprocMetastoreService = res.DataprocMetastoreService - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigMetastoreConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigMetastoreConfig *ClusterConfigMetastoreConfig = &ClusterConfigMetastoreConfig{empty: true} - -func (r *ClusterConfigMetastoreConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigMetastoreConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigMetastoreConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigDataprocMetricConfig struct { - empty bool `json:"-"` - Metrics []ClusterConfigDataprocMetricConfigMetrics `json:"metrics"` -} - -type jsonClusterConfigDataprocMetricConfig ClusterConfigDataprocMetricConfig - -func (r *ClusterConfigDataprocMetricConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigDataprocMetricConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigDataprocMetricConfig - } else { - - r.Metrics = res.Metrics - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigDataprocMetricConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigDataprocMetricConfig *ClusterConfigDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{empty: true} - -func (r *ClusterConfigDataprocMetricConfig) Empty() bool { - return r.empty -} - -func (r *ClusterConfigDataprocMetricConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigDataprocMetricConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterConfigDataprocMetricConfigMetrics struct { - empty bool `json:"-"` - MetricSource *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum `json:"metricSource"` - MetricOverrides []string `json:"metricOverrides"` -} - -type jsonClusterConfigDataprocMetricConfigMetrics ClusterConfigDataprocMetricConfigMetrics - -func (r *ClusterConfigDataprocMetricConfigMetrics) UnmarshalJSON(data []byte) error { - var res jsonClusterConfigDataprocMetricConfigMetrics - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterConfigDataprocMetricConfigMetrics - } else { - - r.MetricSource = res.MetricSource - - r.MetricOverrides = res.MetricOverrides - - } - return nil -} - -// This object is used to assert a desired state where this ClusterConfigDataprocMetricConfigMetrics is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterConfigDataprocMetricConfigMetrics *ClusterConfigDataprocMetricConfigMetrics = &ClusterConfigDataprocMetricConfigMetrics{empty: true} - -func (r *ClusterConfigDataprocMetricConfigMetrics) Empty() bool { - return r.empty -} - -func (r *ClusterConfigDataprocMetricConfigMetrics) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterConfigDataprocMetricConfigMetrics) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterStatus struct { - empty bool `json:"-"` - State *ClusterStatusStateEnum `json:"state"` - Detail *string `json:"detail"` - StateStartTime *string `json:"stateStartTime"` - Substate *ClusterStatusSubstateEnum `json:"substate"` -} - -type jsonClusterStatus ClusterStatus - -func (r *ClusterStatus) UnmarshalJSON(data []byte) error { - var res jsonClusterStatus - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterStatus - } else { - - r.State = res.State - - r.Detail = res.Detail - - r.StateStartTime = res.StateStartTime - - r.Substate = res.Substate - - } - return nil -} - -// This object is used to assert a desired state where this ClusterStatus is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterStatus *ClusterStatus = &ClusterStatus{empty: true} - -func (r *ClusterStatus) Empty() bool { - return r.empty -} - -func (r *ClusterStatus) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterStatus) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterStatusHistory struct { - empty bool `json:"-"` - State *ClusterStatusHistoryStateEnum `json:"state"` - Detail *string `json:"detail"` - StateStartTime *string `json:"stateStartTime"` - Substate *ClusterStatusHistorySubstateEnum `json:"substate"` -} - -type jsonClusterStatusHistory ClusterStatusHistory - -func (r *ClusterStatusHistory) UnmarshalJSON(data []byte) error { - var res jsonClusterStatusHistory - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterStatusHistory - } else { - - r.State = res.State - - r.Detail = res.Detail - - r.StateStartTime = res.StateStartTime - - r.Substate = res.Substate - - } - return nil -} - -// This object is used to assert a desired state where this ClusterStatusHistory is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterStatusHistory *ClusterStatusHistory = &ClusterStatusHistory{empty: true} - -func (r *ClusterStatusHistory) Empty() bool { - return r.empty -} - -func (r *ClusterStatusHistory) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterStatusHistory) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterMetrics struct { - empty bool `json:"-"` - HdfsMetrics map[string]string `json:"hdfsMetrics"` - YarnMetrics map[string]string `json:"yarnMetrics"` -} - -type jsonClusterMetrics ClusterMetrics - -func (r *ClusterMetrics) UnmarshalJSON(data []byte) error { - var res jsonClusterMetrics - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterMetrics - } else { - - r.HdfsMetrics = res.HdfsMetrics - - r.YarnMetrics = res.YarnMetrics - - } - return nil -} - -// This object is used to assert a desired state where this ClusterMetrics is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterMetrics *ClusterMetrics = &ClusterMetrics{empty: true} - -func (r *ClusterMetrics) Empty() bool { - return r.empty -} - -func (r *ClusterMetrics) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterMetrics) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfig struct { - empty bool `json:"-"` - StagingBucket *string `json:"stagingBucket"` - KubernetesClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfig `json:"kubernetesClusterConfig"` - AuxiliaryServicesConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfig `json:"auxiliaryServicesConfig"` -} - -type jsonClusterVirtualClusterConfig ClusterVirtualClusterConfig - -func (r *ClusterVirtualClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfig - } else { - - r.StagingBucket = res.StagingBucket - - r.KubernetesClusterConfig = res.KubernetesClusterConfig - - r.AuxiliaryServicesConfig = res.AuxiliaryServicesConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfig *ClusterVirtualClusterConfig = &ClusterVirtualClusterConfig{empty: true} - -func (r *ClusterVirtualClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfig struct { - empty bool `json:"-"` - KubernetesNamespace *string `json:"kubernetesNamespace"` - GkeClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig `json:"gkeClusterConfig"` - KubernetesSoftwareConfig *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig `json:"kubernetesSoftwareConfig"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfig - } else { - - r.KubernetesNamespace = res.KubernetesNamespace - - r.GkeClusterConfig = res.GkeClusterConfig - - r.KubernetesSoftwareConfig = res.KubernetesSoftwareConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig struct { - empty bool `json:"-"` - GkeClusterTarget *string `json:"gkeClusterTarget"` - NodePoolTarget []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget `json:"nodePoolTarget"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - } else { - - r.GkeClusterTarget = res.GkeClusterTarget - - r.NodePoolTarget = res.NodePoolTarget - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget struct { - empty bool `json:"-"` - NodePool *string `json:"nodePool"` - Roles []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum `json:"roles"` - NodePoolConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig `json:"nodePoolConfig"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - } else { - - r.NodePool = res.NodePool - - r.Roles = res.Roles - - r.NodePoolConfig = res.NodePoolConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig struct { - empty bool `json:"-"` - Config *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig `json:"config"` - Locations []string `json:"locations"` - Autoscaling *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling `json:"autoscaling"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - } else { - - r.Config = res.Config - - r.Locations = res.Locations - - r.Autoscaling = res.Autoscaling - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig struct { - empty bool `json:"-"` - MachineType *string `json:"machineType"` - LocalSsdCount *int64 `json:"localSsdCount"` - Preemptible *bool `json:"preemptible"` - Accelerators []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators `json:"accelerators"` - MinCpuPlatform *string `json:"minCpuPlatform"` - BootDiskKmsKey *string `json:"bootDiskKmsKey"` - EphemeralStorageConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig `json:"ephemeralStorageConfig"` - Spot *bool `json:"spot"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - } else { - - r.MachineType = res.MachineType - - r.LocalSsdCount = res.LocalSsdCount - - r.Preemptible = res.Preemptible - - r.Accelerators = res.Accelerators - - r.MinCpuPlatform = res.MinCpuPlatform - - r.BootDiskKmsKey = res.BootDiskKmsKey - - r.EphemeralStorageConfig = res.EphemeralStorageConfig - - r.Spot = res.Spot - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators struct { - empty bool `json:"-"` - AcceleratorCount *int64 `json:"acceleratorCount"` - AcceleratorType *string `json:"acceleratorType"` - GpuPartitionSize *string `json:"gpuPartitionSize"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - } else { - - r.AcceleratorCount = res.AcceleratorCount - - r.AcceleratorType = res.AcceleratorType - - r.GpuPartitionSize = res.GpuPartitionSize - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig struct { - empty bool `json:"-"` - LocalSsdCount *int64 `json:"localSsdCount"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - } else { - - r.LocalSsdCount = res.LocalSsdCount - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling struct { - empty bool `json:"-"` - MinNodeCount *int64 `json:"minNodeCount"` - MaxNodeCount *int64 `json:"maxNodeCount"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - } else { - - r.MinNodeCount = res.MinNodeCount - - r.MaxNodeCount = res.MaxNodeCount - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig struct { - empty bool `json:"-"` - ComponentVersion map[string]string `json:"componentVersion"` - Properties map[string]string `json:"properties"` -} - -type jsonClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - } else { - - r.ComponentVersion = res.ComponentVersion - - r.Properties = res.Properties - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{empty: true} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigAuxiliaryServicesConfig struct { - empty bool `json:"-"` - MetastoreConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig `json:"metastoreConfig"` - SparkHistoryServerConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig `json:"sparkHistoryServerConfig"` -} - -type jsonClusterVirtualClusterConfigAuxiliaryServicesConfig ClusterVirtualClusterConfigAuxiliaryServicesConfig - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig - } else { - - r.MetastoreConfig = res.MetastoreConfig - - r.SparkHistoryServerConfig = res.SparkHistoryServerConfig - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{empty: true} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig struct { - empty bool `json:"-"` - DataprocMetastoreService *string `json:"dataprocMetastoreService"` -} - -type jsonClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - } else { - - r.DataprocMetastoreService = res.DataprocMetastoreService - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{empty: true} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig struct { - empty bool `json:"-"` - DataprocCluster *string `json:"dataprocCluster"` -} - -type jsonClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) UnmarshalJSON(data []byte) error { - var res jsonClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - } else { - - r.DataprocCluster = res.DataprocCluster - - } - return nil -} - -// This object is used to assert a desired state where this ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{empty: true} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) Empty() bool { - return r.empty -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Cluster) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "dataproc", - Type: "Cluster", - Version: "beta", - } -} - -func (r *Cluster) ID() (string, error) { - if err := extractClusterFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - "config": dcl.ValueOrEmptyString(nr.Config), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "status": dcl.ValueOrEmptyString(nr.Status), - "status_history": dcl.ValueOrEmptyString(nr.StatusHistory), - "cluster_uuid": dcl.ValueOrEmptyString(nr.ClusterUuid), - "metrics": dcl.ValueOrEmptyString(nr.Metrics), - "location": dcl.ValueOrEmptyString(nr.Location), - "virtual_cluster_config": dcl.ValueOrEmptyString(nr.VirtualClusterConfig), - } - return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", params), nil -} - -const ClusterMaxPage = -1 - -type ClusterList struct { - Items []*Cluster - - nextToken string - - pageSize int32 - - resource *Cluster -} - -func (l *ClusterList) HasNext() bool { - return l.nextToken != "" -} - -func (l *ClusterList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listCluster(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListCluster(ctx context.Context, project, location string) (*ClusterList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListClusterWithMaxResults(ctx, project, location, ClusterMaxPage) - -} - -func (c *Client) ListClusterWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ClusterList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Cluster{ - Project: &project, - Location: &location, - } - items, token, err := c.listCluster(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &ClusterList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetCluster(ctx context.Context, r *Cluster) (*Cluster, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractClusterFields(r) - - b, err := c.getClusterRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalCluster(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeClusterNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractClusterFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteCluster(ctx context.Context, r *Cluster) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Cluster resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Cluster...") - deleteOp := deleteClusterOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllCluster deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllCluster(ctx context.Context, project, location string, filter func(*Cluster) bool) error { - listObj, err := c.ListCluster(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllCluster(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllCluster(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyCluster(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Cluster - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyClusterHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyClusterHelper(c *Client, ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyCluster...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractClusterFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.clusterDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToClusterDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []clusterApiOperation - if create { - ops = append(ops, &createClusterOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyClusterDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyClusterDiff(c *Client, ctx context.Context, desired *Cluster, rawDesired *Cluster, ops []clusterApiOperation, opts ...dcl.ApplyOption) (*Cluster, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetCluster(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createClusterOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapCluster(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeClusterNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeClusterNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeClusterDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractClusterFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractClusterFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffCluster(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} - -func (r *Cluster) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { - u := r.getPolicyURL(basePath) - body := &bytes.Buffer{} - body.WriteString(fmt.Sprintf(`{"options":{"requestedPolicyVersion": %d}}`, r.IAMPolicyVersion())) - return u, "POST", body, nil -} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl deleted file mode 100644 index aa6867462f81..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/cluster_internal.go.tmpl +++ /dev/null @@ -1,18408 +0,0 @@ -package dataproc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" -) - -func (r *Cluster) validate() error { - - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Config) { - if err := r.Config.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Status) { - if err := r.Status.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Metrics) { - if err := r.Metrics.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.VirtualClusterConfig) { - if err := r.VirtualClusterConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.GceClusterConfig) { - if err := r.GceClusterConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.MasterConfig) { - if err := r.MasterConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.WorkerConfig) { - if err := r.WorkerConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryWorkerConfig) { - if err := r.SecondaryWorkerConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SoftwareConfig) { - if err := r.SoftwareConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.EncryptionConfig) { - if err := r.EncryptionConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.AutoscalingConfig) { - if err := r.AutoscalingConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecurityConfig) { - if err := r.SecurityConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LifecycleConfig) { - if err := r.LifecycleConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.EndpointConfig) { - if err := r.EndpointConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { - if err := r.GkeClusterConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { - if err := r.MetastoreConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.DataprocMetricConfig) { - if err := r.DataprocMetricConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigGceClusterConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.ReservationAffinity) { - if err := r.ReservationAffinity.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.NodeGroupAffinity) { - if err := r.NodeGroupAffinity.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ShieldedInstanceConfig) { - if err := r.ShieldedInstanceConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ConfidentialInstanceConfig) { - if err := r.ConfidentialInstanceConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigGceClusterConfigReservationAffinity) validate() error { - return nil -} -func (r *ClusterConfigGceClusterConfigNodeGroupAffinity) validate() error { - if err := dcl.Required(r, "nodeGroup"); err != nil { - return err - } - return nil -} -func (r *ClusterConfigGceClusterConfigShieldedInstanceConfig) validate() error { - return nil -} -func (r *ClusterConfigGceClusterConfigConfidentialInstanceConfig) validate() error { - return nil -} -func (r *ClusterConfigMasterConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.DiskConfig) { - if err := r.DiskConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { - if err := r.ManagedGroupConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigMasterConfigDiskConfig) validate() error { - return nil -} -func (r *ClusterConfigMasterConfigManagedGroupConfig) validate() error { - return nil -} -func (r *ClusterConfigMasterConfigAccelerators) validate() error { - return nil -} -func (r *ClusterConfigMasterConfigInstanceReferences) validate() error { - return nil -} -func (r *ClusterConfigWorkerConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.DiskConfig) { - if err := r.DiskConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { - if err := r.ManagedGroupConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigWorkerConfigDiskConfig) validate() error { - return nil -} -func (r *ClusterConfigWorkerConfigManagedGroupConfig) validate() error { - return nil -} -func (r *ClusterConfigWorkerConfigAccelerators) validate() error { - return nil -} -func (r *ClusterConfigWorkerConfigInstanceReferences) validate() error { - return nil -} -func (r *ClusterConfigSecondaryWorkerConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.DiskConfig) { - if err := r.DiskConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ManagedGroupConfig) { - if err := r.ManagedGroupConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigSecondaryWorkerConfigDiskConfig) validate() error { - return nil -} -func (r *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) validate() error { - return nil -} -func (r *ClusterConfigSecondaryWorkerConfigAccelerators) validate() error { - return nil -} -func (r *ClusterConfigSecondaryWorkerConfigInstanceReferences) validate() error { - return nil -} -func (r *ClusterConfigSoftwareConfig) validate() error { - return nil -} -func (r *ClusterConfigInitializationActions) validate() error { - if err := dcl.Required(r, "executableFile"); err != nil { - return err - } - return nil -} -func (r *ClusterConfigEncryptionConfig) validate() error { - return nil -} -func (r *ClusterConfigAutoscalingConfig) validate() error { - return nil -} -func (r *ClusterConfigSecurityConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.KerberosConfig) { - if err := r.KerberosConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.IdentityConfig) { - if err := r.IdentityConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigSecurityConfigKerberosConfig) validate() error { - return nil -} -func (r *ClusterConfigSecurityConfigIdentityConfig) validate() error { - if err := dcl.Required(r, "userServiceAccountMapping"); err != nil { - return err - } - return nil -} -func (r *ClusterConfigLifecycleConfig) validate() error { - return nil -} -func (r *ClusterConfigEndpointConfig) validate() error { - return nil -} -func (r *ClusterConfigGkeClusterConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.NamespacedGkeDeploymentTarget) { - if err := r.NamespacedGkeDeploymentTarget.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) validate() error { - return nil -} -func (r *ClusterConfigMetastoreConfig) validate() error { - if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { - return err - } - return nil -} -func (r *ClusterConfigDataprocMetricConfig) validate() error { - if err := dcl.Required(r, "metrics"); err != nil { - return err - } - return nil -} -func (r *ClusterConfigDataprocMetricConfigMetrics) validate() error { - if err := dcl.Required(r, "metricSource"); err != nil { - return err - } - return nil -} -func (r *ClusterStatus) validate() error { - return nil -} -func (r *ClusterStatusHistory) validate() error { - return nil -} -func (r *ClusterMetrics) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfig) validate() error { - if err := dcl.Required(r, "kubernetesClusterConfig"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.KubernetesClusterConfig) { - if err := r.KubernetesClusterConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.AuxiliaryServicesConfig) { - if err := r.AuxiliaryServicesConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfig) validate() error { - if err := dcl.Required(r, "gkeClusterConfig"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.GkeClusterConfig) { - if err := r.GkeClusterConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.KubernetesSoftwareConfig) { - if err := r.KubernetesSoftwareConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) validate() error { - if err := dcl.Required(r, "nodePool"); err != nil { - return err - } - if err := dcl.Required(r, "roles"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.NodePoolConfig) { - if err := r.NodePoolConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.Config) { - if err := r.Config.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Autoscaling) { - if err := r.Autoscaling.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.EphemeralStorageConfig) { - if err := r.EphemeralStorageConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) validate() error { - return nil -} -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.MetastoreConfig) { - if err := r.MetastoreConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SparkHistoryServerConfig) { - if err := r.SparkHistoryServerConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) validate() error { - if err := dcl.Required(r, "dataprocMetastoreService"); err != nil { - return err - } - return nil -} -func (r *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) validate() error { - return nil -} -func (r *Cluster) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://dataproc.googleapis.com/v1beta2/", params) -} - -func (r *Cluster) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -func (r *Cluster) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters", nr.basePath(), userBasePath, params), nil - -} - -func (r *Cluster) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters", nr.basePath(), userBasePath, params), nil - -} - -func (r *Cluster) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -func (r *Cluster) SetPolicyURL(userBasePath string) string { - nr := r.urlNormalized() - fields := map[string]interface{}{ - "project": *nr.Project, - "location": *nr.Location, - "name": *nr.Name, - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}:setIamPolicy", nr.basePath(), userBasePath, fields) -} - -func (r *Cluster) SetPolicyVerb() string { - return "POST" -} - -func (r *Cluster) getPolicyURL(userBasePath string) string { - nr := r.urlNormalized() - fields := map[string]interface{}{ - "project": *nr.Project, - "location": *nr.Location, - "name": *nr.Name, - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}:getIamPolicy", nr.basePath(), userBasePath, fields) -} - -func (r *Cluster) IAMPolicyVersion() int { - return 3 -} - -// clusterApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type clusterApiOperation interface { - do(context.Context, *Cluster, *Client) error -} - -// newUpdateClusterUpdateClusterRequest creates a request for an -// Cluster resource's UpdateCluster update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateClusterUpdateClusterRequest(ctx context.Context, f *Cluster, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - return req, nil -} - -// marshalUpdateClusterUpdateClusterRequest converts the update into -// the final JSON request body. -func marshalUpdateClusterUpdateClusterRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateClusterUpdateClusterOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateClusterUpdateClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { - _, err := c.GetCluster(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateCluster") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateClusterUpdateClusterRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateClusterUpdateClusterRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") - - if err != nil { - return err - } - - return nil -} - -func (c *Client) listClusterRaw(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != ClusterMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listClusterOperation struct { - Clusters []map[string]interface{} `json:"clusters"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listCluster(ctx context.Context, r *Cluster, pageToken string, pageSize int32) ([]*Cluster, string, error) { - b, err := c.listClusterRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listClusterOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Cluster - for _, v := range m.Clusters { - res, err := unmarshalMapCluster(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllCluster(ctx context.Context, f func(*Cluster) bool, resources []*Cluster) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteCluster(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteClusterOperation struct{} - -func (op *deleteClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { - r, err := c.GetCluster(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Cluster not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetCluster checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetCluster(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createClusterOperation struct { - response map[string]interface{} -} - -func (op *createClusterOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createClusterOperation) do(ctx context.Context, r *Cluster, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetCluster(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getClusterRaw(ctx context.Context, r *Cluster) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) clusterDiffsForRawDesired(ctx context.Context, rawDesired *Cluster, opts ...dcl.ApplyOption) (initial, desired *Cluster, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Cluster - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Cluster); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Cluster, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetCluster(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Cluster resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Cluster resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Cluster resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Cluster: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Cluster: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractClusterFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeClusterInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Cluster: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeClusterDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Cluster: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffCluster(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeClusterInitialState(rawInitial, rawDesired *Cluster) (*Cluster, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeClusterDesiredState(rawDesired, rawInitial *Cluster, opts ...dcl.ApplyOption) (*Cluster, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.Config = canonicalizeClusterConfig(rawDesired.Config, nil, opts...) - rawDesired.Status = canonicalizeClusterStatus(rawDesired.Status, nil, opts...) - rawDesired.Metrics = canonicalizeClusterMetrics(rawDesired.Metrics, nil, opts...) - rawDesired.VirtualClusterConfig = canonicalizeClusterVirtualClusterConfig(rawDesired.VirtualClusterConfig, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Cluster{} - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - canonicalDesired.Config = canonicalizeClusterConfig(rawDesired.Config, rawInitial.Config, opts...) - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - canonicalDesired.VirtualClusterConfig = canonicalizeClusterVirtualClusterConfig(rawDesired.VirtualClusterConfig, rawInitial.VirtualClusterConfig, opts...) - return canonicalDesired, nil -} - -func canonicalizeClusterNewState(c *Client, rawNew, rawDesired *Cluster) (*Cluster, error) { - - rawNew.Project = rawDesired.Project - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Config) && dcl.IsEmptyValueIndirect(rawDesired.Config) { - rawNew.Config = rawDesired.Config - } else { - rawNew.Config = canonicalizeNewClusterConfig(c, rawDesired.Config, rawNew.Config) - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Status) && dcl.IsEmptyValueIndirect(rawDesired.Status) { - rawNew.Status = rawDesired.Status - } else { - rawNew.Status = canonicalizeNewClusterStatus(c, rawDesired.Status, rawNew.Status) - } - - if dcl.IsEmptyValueIndirect(rawNew.StatusHistory) && dcl.IsEmptyValueIndirect(rawDesired.StatusHistory) { - rawNew.StatusHistory = rawDesired.StatusHistory - } else { - rawNew.StatusHistory = canonicalizeNewClusterStatusHistorySlice(c, rawDesired.StatusHistory, rawNew.StatusHistory) - } - - if dcl.IsEmptyValueIndirect(rawNew.ClusterUuid) && dcl.IsEmptyValueIndirect(rawDesired.ClusterUuid) { - rawNew.ClusterUuid = rawDesired.ClusterUuid - } else { - if dcl.StringCanonicalize(rawDesired.ClusterUuid, rawNew.ClusterUuid) { - rawNew.ClusterUuid = rawDesired.ClusterUuid - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Metrics) && dcl.IsEmptyValueIndirect(rawDesired.Metrics) { - rawNew.Metrics = rawDesired.Metrics - } else { - rawNew.Metrics = canonicalizeNewClusterMetrics(c, rawDesired.Metrics, rawNew.Metrics) - } - - rawNew.Location = rawDesired.Location - - if dcl.IsEmptyValueIndirect(rawNew.VirtualClusterConfig) && dcl.IsEmptyValueIndirect(rawDesired.VirtualClusterConfig) { - rawNew.VirtualClusterConfig = rawDesired.VirtualClusterConfig - } else { - rawNew.VirtualClusterConfig = canonicalizeNewClusterVirtualClusterConfig(c, rawDesired.VirtualClusterConfig, rawNew.VirtualClusterConfig) - } - - return rawNew, nil -} - -func canonicalizeClusterConfig(des, initial *ClusterConfig, opts ...dcl.ApplyOption) *ClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfig{} - - if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.StagingBucket = initial.StagingBucket - } else { - cDes.StagingBucket = des.StagingBucket - } - if dcl.IsZeroValue(des.TempBucket) || (dcl.IsEmptyValueIndirect(des.TempBucket) && dcl.IsEmptyValueIndirect(initial.TempBucket)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.TempBucket = initial.TempBucket - } else { - cDes.TempBucket = des.TempBucket - } - cDes.GceClusterConfig = canonicalizeClusterConfigGceClusterConfig(des.GceClusterConfig, initial.GceClusterConfig, opts...) - cDes.MasterConfig = canonicalizeClusterConfigMasterConfig(des.MasterConfig, initial.MasterConfig, opts...) - cDes.WorkerConfig = canonicalizeClusterConfigWorkerConfig(des.WorkerConfig, initial.WorkerConfig, opts...) - cDes.SecondaryWorkerConfig = canonicalizeClusterConfigSecondaryWorkerConfig(des.SecondaryWorkerConfig, initial.SecondaryWorkerConfig, opts...) - cDes.SoftwareConfig = canonicalizeClusterConfigSoftwareConfig(des.SoftwareConfig, initial.SoftwareConfig, opts...) - cDes.InitializationActions = canonicalizeClusterConfigInitializationActionsSlice(des.InitializationActions, initial.InitializationActions, opts...) - cDes.EncryptionConfig = canonicalizeClusterConfigEncryptionConfig(des.EncryptionConfig, initial.EncryptionConfig, opts...) - cDes.AutoscalingConfig = canonicalizeClusterConfigAutoscalingConfig(des.AutoscalingConfig, initial.AutoscalingConfig, opts...) - cDes.SecurityConfig = canonicalizeClusterConfigSecurityConfig(des.SecurityConfig, initial.SecurityConfig, opts...) - cDes.LifecycleConfig = canonicalizeClusterConfigLifecycleConfig(des.LifecycleConfig, initial.LifecycleConfig, opts...) - cDes.EndpointConfig = canonicalizeClusterConfigEndpointConfig(des.EndpointConfig, initial.EndpointConfig, opts...) - cDes.GkeClusterConfig = canonicalizeClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) - cDes.MetastoreConfig = canonicalizeClusterConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) - cDes.DataprocMetricConfig = canonicalizeClusterConfigDataprocMetricConfig(des.DataprocMetricConfig, initial.DataprocMetricConfig, opts...) - - return cDes -} - -func canonicalizeClusterConfigSlice(des, initial []ClusterConfig, opts ...dcl.ApplyOption) []ClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfig(c *Client, des, nw *ClusterConfig) *ClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.GceClusterConfig = canonicalizeNewClusterConfigGceClusterConfig(c, des.GceClusterConfig, nw.GceClusterConfig) - nw.MasterConfig = canonicalizeNewClusterConfigMasterConfig(c, des.MasterConfig, nw.MasterConfig) - nw.WorkerConfig = canonicalizeNewClusterConfigWorkerConfig(c, des.WorkerConfig, nw.WorkerConfig) - nw.SecondaryWorkerConfig = canonicalizeNewClusterConfigSecondaryWorkerConfig(c, des.SecondaryWorkerConfig, nw.SecondaryWorkerConfig) - nw.SoftwareConfig = canonicalizeNewClusterConfigSoftwareConfig(c, des.SoftwareConfig, nw.SoftwareConfig) - nw.InitializationActions = canonicalizeNewClusterConfigInitializationActionsSlice(c, des.InitializationActions, nw.InitializationActions) - nw.EncryptionConfig = canonicalizeNewClusterConfigEncryptionConfig(c, des.EncryptionConfig, nw.EncryptionConfig) - nw.AutoscalingConfig = canonicalizeNewClusterConfigAutoscalingConfig(c, des.AutoscalingConfig, nw.AutoscalingConfig) - nw.SecurityConfig = canonicalizeNewClusterConfigSecurityConfig(c, des.SecurityConfig, nw.SecurityConfig) - nw.LifecycleConfig = canonicalizeNewClusterConfigLifecycleConfig(c, des.LifecycleConfig, nw.LifecycleConfig) - nw.EndpointConfig = canonicalizeNewClusterConfigEndpointConfig(c, des.EndpointConfig, nw.EndpointConfig) - nw.GkeClusterConfig = canonicalizeNewClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) - nw.MetastoreConfig = canonicalizeNewClusterConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) - nw.DataprocMetricConfig = canonicalizeNewClusterConfigDataprocMetricConfig(c, des.DataprocMetricConfig, nw.DataprocMetricConfig) - - return nw -} - -func canonicalizeNewClusterConfigSet(c *Client, des, nw []ClusterConfig) []ClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSlice(c *Client, des, nw []ClusterConfig) []ClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGceClusterConfig(des, initial *ClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGceClusterConfig{} - - if dcl.StringCanonicalize(des.Zone, initial.Zone) || dcl.IsZeroValue(des.Zone) { - cDes.Zone = initial.Zone - } else { - cDes.Zone = des.Zone - } - if dcl.IsZeroValue(des.Network) || (dcl.IsEmptyValueIndirect(des.Network) && dcl.IsEmptyValueIndirect(initial.Network)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Network = initial.Network - } else { - cDes.Network = des.Network - } - if dcl.IsZeroValue(des.Subnetwork) || (dcl.IsEmptyValueIndirect(des.Subnetwork) && dcl.IsEmptyValueIndirect(initial.Subnetwork)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Subnetwork = initial.Subnetwork - } else { - cDes.Subnetwork = des.Subnetwork - } - if dcl.BoolCanonicalize(des.InternalIPOnly, initial.InternalIPOnly) || dcl.IsZeroValue(des.InternalIPOnly) { - cDes.InternalIPOnly = initial.InternalIPOnly - } else { - cDes.InternalIPOnly = des.InternalIPOnly - } - if dcl.IsZeroValue(des.PrivateIPv6GoogleAccess) || (dcl.IsEmptyValueIndirect(des.PrivateIPv6GoogleAccess) && dcl.IsEmptyValueIndirect(initial.PrivateIPv6GoogleAccess)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PrivateIPv6GoogleAccess = initial.PrivateIPv6GoogleAccess - } else { - cDes.PrivateIPv6GoogleAccess = des.PrivateIPv6GoogleAccess - } - if dcl.IsZeroValue(des.ServiceAccount) || (dcl.IsEmptyValueIndirect(des.ServiceAccount) && dcl.IsEmptyValueIndirect(initial.ServiceAccount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ServiceAccount = initial.ServiceAccount - } else { - cDes.ServiceAccount = des.ServiceAccount - } - if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, initial.ServiceAccountScopes) { - cDes.ServiceAccountScopes = initial.ServiceAccountScopes - } else { - cDes.ServiceAccountScopes = des.ServiceAccountScopes - } - if dcl.StringArrayCanonicalize(des.Tags, initial.Tags) { - cDes.Tags = initial.Tags - } else { - cDes.Tags = des.Tags - } - if dcl.IsZeroValue(des.Metadata) || (dcl.IsEmptyValueIndirect(des.Metadata) && dcl.IsEmptyValueIndirect(initial.Metadata)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Metadata = initial.Metadata - } else { - cDes.Metadata = des.Metadata - } - cDes.ReservationAffinity = canonicalizeClusterConfigGceClusterConfigReservationAffinity(des.ReservationAffinity, initial.ReservationAffinity, opts...) - cDes.NodeGroupAffinity = canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(des.NodeGroupAffinity, initial.NodeGroupAffinity, opts...) - cDes.ShieldedInstanceConfig = canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(des.ShieldedInstanceConfig, initial.ShieldedInstanceConfig, opts...) - cDes.ConfidentialInstanceConfig = canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(des.ConfidentialInstanceConfig, initial.ConfidentialInstanceConfig, opts...) - - return cDes -} - -func canonicalizeClusterConfigGceClusterConfigSlice(des, initial []ClusterConfigGceClusterConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGceClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGceClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGceClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGceClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGceClusterConfig(c *Client, des, nw *ClusterConfigGceClusterConfig) *ClusterConfigGceClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Zone, nw.Zone) { - nw.Zone = des.Zone - } - if dcl.BoolCanonicalize(des.InternalIPOnly, nw.InternalIPOnly) { - nw.InternalIPOnly = des.InternalIPOnly - } - if dcl.StringArrayCanonicalize(des.ServiceAccountScopes, nw.ServiceAccountScopes) { - nw.ServiceAccountScopes = des.ServiceAccountScopes - } - if dcl.StringArrayCanonicalize(des.Tags, nw.Tags) { - nw.Tags = des.Tags - } - nw.ReservationAffinity = canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, des.ReservationAffinity, nw.ReservationAffinity) - nw.NodeGroupAffinity = canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, des.NodeGroupAffinity, nw.NodeGroupAffinity) - nw.ShieldedInstanceConfig = canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, des.ShieldedInstanceConfig, nw.ShieldedInstanceConfig) - nw.ConfidentialInstanceConfig = canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, des.ConfidentialInstanceConfig, nw.ConfidentialInstanceConfig) - - return nw -} - -func canonicalizeNewClusterConfigGceClusterConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfig) []ClusterConfigGceClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGceClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGceClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGceClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGceClusterConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfig) []ClusterConfigGceClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGceClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGceClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGceClusterConfigReservationAffinity(des, initial *ClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigReservationAffinity { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGceClusterConfigReservationAffinity{} - - if dcl.IsZeroValue(des.ConsumeReservationType) || (dcl.IsEmptyValueIndirect(des.ConsumeReservationType) && dcl.IsEmptyValueIndirect(initial.ConsumeReservationType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ConsumeReservationType = initial.ConsumeReservationType - } else { - cDes.ConsumeReservationType = des.ConsumeReservationType - } - if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { - cDes.Key = initial.Key - } else { - cDes.Key = des.Key - } - if dcl.StringArrayCanonicalize(des.Values, initial.Values) { - cDes.Values = initial.Values - } else { - cDes.Values = des.Values - } - - return cDes -} - -func canonicalizeClusterConfigGceClusterConfigReservationAffinitySlice(des, initial []ClusterConfigGceClusterConfigReservationAffinity, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigReservationAffinity { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigReservationAffinity(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigReservationAffinity(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c *Client, des, nw *ClusterConfigGceClusterConfigReservationAffinity) *ClusterConfigGceClusterConfigReservationAffinity { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigReservationAffinity while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Key, nw.Key) { - nw.Key = des.Key - } - if dcl.StringArrayCanonicalize(des.Values, nw.Values) { - nw.Values = des.Values - } - - return nw -} - -func canonicalizeNewClusterConfigGceClusterConfigReservationAffinitySet(c *Client, des, nw []ClusterConfigGceClusterConfigReservationAffinity) []ClusterConfigGceClusterConfigReservationAffinity { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGceClusterConfigReservationAffinity - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGceClusterConfigReservationAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, des, nw []ClusterConfigGceClusterConfigReservationAffinity) []ClusterConfigGceClusterConfigReservationAffinity { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGceClusterConfigReservationAffinity - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigReservationAffinity(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(des, initial *ClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigNodeGroupAffinity { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGceClusterConfigNodeGroupAffinity{} - - if dcl.IsZeroValue(des.NodeGroup) || (dcl.IsEmptyValueIndirect(des.NodeGroup) && dcl.IsEmptyValueIndirect(initial.NodeGroup)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NodeGroup = initial.NodeGroup - } else { - cDes.NodeGroup = des.NodeGroup - } - - return cDes -} - -func canonicalizeClusterConfigGceClusterConfigNodeGroupAffinitySlice(des, initial []ClusterConfigGceClusterConfigNodeGroupAffinity, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigNodeGroupAffinity { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigNodeGroupAffinity(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, des, nw *ClusterConfigGceClusterConfigNodeGroupAffinity) *ClusterConfigGceClusterConfigNodeGroupAffinity { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigNodeGroupAffinity while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinitySet(c *Client, des, nw []ClusterConfigGceClusterConfigNodeGroupAffinity) []ClusterConfigGceClusterConfigNodeGroupAffinity { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGceClusterConfigNodeGroupAffinity - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, des, nw []ClusterConfigGceClusterConfigNodeGroupAffinity) []ClusterConfigGceClusterConfigNodeGroupAffinity { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGceClusterConfigNodeGroupAffinity - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigNodeGroupAffinity(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(des, initial *ClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigShieldedInstanceConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGceClusterConfigShieldedInstanceConfig{} - - if dcl.BoolCanonicalize(des.EnableSecureBoot, initial.EnableSecureBoot) || dcl.IsZeroValue(des.EnableSecureBoot) { - cDes.EnableSecureBoot = initial.EnableSecureBoot - } else { - cDes.EnableSecureBoot = des.EnableSecureBoot - } - if dcl.BoolCanonicalize(des.EnableVtpm, initial.EnableVtpm) || dcl.IsZeroValue(des.EnableVtpm) { - cDes.EnableVtpm = initial.EnableVtpm - } else { - cDes.EnableVtpm = des.EnableVtpm - } - if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, initial.EnableIntegrityMonitoring) || dcl.IsZeroValue(des.EnableIntegrityMonitoring) { - cDes.EnableIntegrityMonitoring = initial.EnableIntegrityMonitoring - } else { - cDes.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring - } - - return cDes -} - -func canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfigSlice(des, initial []ClusterConfigGceClusterConfigShieldedInstanceConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigShieldedInstanceConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigShieldedInstanceConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, des, nw *ClusterConfigGceClusterConfigShieldedInstanceConfig) *ClusterConfigGceClusterConfigShieldedInstanceConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigShieldedInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.EnableSecureBoot, nw.EnableSecureBoot) { - nw.EnableSecureBoot = des.EnableSecureBoot - } - if dcl.BoolCanonicalize(des.EnableVtpm, nw.EnableVtpm) { - nw.EnableVtpm = des.EnableVtpm - } - if dcl.BoolCanonicalize(des.EnableIntegrityMonitoring, nw.EnableIntegrityMonitoring) { - nw.EnableIntegrityMonitoring = des.EnableIntegrityMonitoring - } - - return nw -} - -func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfigShieldedInstanceConfig) []ClusterConfigGceClusterConfigShieldedInstanceConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGceClusterConfigShieldedInstanceConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfigShieldedInstanceConfig) []ClusterConfigGceClusterConfigShieldedInstanceConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGceClusterConfigShieldedInstanceConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigShieldedInstanceConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(des, initial *ClusterConfigGceClusterConfigConfidentialInstanceConfig, opts ...dcl.ApplyOption) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - - if dcl.BoolCanonicalize(des.EnableConfidentialCompute, initial.EnableConfidentialCompute) || dcl.IsZeroValue(des.EnableConfidentialCompute) { - cDes.EnableConfidentialCompute = initial.EnableConfidentialCompute - } else { - cDes.EnableConfidentialCompute = des.EnableConfidentialCompute - } - - return cDes -} - -func canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(des, initial []ClusterConfigGceClusterConfigConfidentialInstanceConfig, opts ...dcl.ApplyOption) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGceClusterConfigConfidentialInstanceConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, des, nw *ClusterConfigGceClusterConfigConfidentialInstanceConfig) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGceClusterConfigConfidentialInstanceConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.EnableConfidentialCompute, nw.EnableConfidentialCompute) { - nw.EnableConfidentialCompute = des.EnableConfidentialCompute - } - - return nw -} - -func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfigSet(c *Client, des, nw []ClusterConfigGceClusterConfigConfidentialInstanceConfig) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGceClusterConfigConfidentialInstanceConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, des, nw []ClusterConfigGceClusterConfigConfidentialInstanceConfig) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGceClusterConfigConfidentialInstanceConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMasterConfig(des, initial *ClusterConfigMasterConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMasterConfig{} - - if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumInstances = initial.NumInstances - } else { - cDes.NumInstances = des.NumInstances - } - if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Image = initial.Image - } else { - cDes.Image = des.Image - } - if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { - cDes.MachineType = initial.MachineType - } else { - cDes.MachineType = des.MachineType - } - cDes.DiskConfig = canonicalizeClusterConfigMasterConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) - if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Preemptibility = initial.Preemptibility - } else { - cDes.Preemptibility = des.Preemptibility - } - cDes.Accelerators = canonicalizeClusterConfigMasterConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) - if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { - cDes.MinCpuPlatform = initial.MinCpuPlatform - } else { - cDes.MinCpuPlatform = des.MinCpuPlatform - } - - return cDes -} - -func canonicalizeClusterConfigMasterConfigSlice(des, initial []ClusterConfigMasterConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMasterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMasterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMasterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMasterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMasterConfig(c *Client, des, nw *ClusterConfigMasterConfig) *ClusterConfigMasterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { - nw.InstanceNames = des.InstanceNames - } - if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { - nw.MachineType = des.MachineType - } - nw.DiskConfig = canonicalizeNewClusterConfigMasterConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) - if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { - nw.IsPreemptible = des.IsPreemptible - } - nw.ManagedGroupConfig = canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) - nw.Accelerators = canonicalizeNewClusterConfigMasterConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) - if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { - nw.MinCpuPlatform = des.MinCpuPlatform - } - nw.InstanceReferences = canonicalizeNewClusterConfigMasterConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) - - return nw -} - -func canonicalizeNewClusterConfigMasterConfigSet(c *Client, des, nw []ClusterConfigMasterConfig) []ClusterConfigMasterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMasterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMasterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMasterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMasterConfigSlice(c *Client, des, nw []ClusterConfigMasterConfig) []ClusterConfigMasterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMasterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMasterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMasterConfigDiskConfig(des, initial *ClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigDiskConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMasterConfigDiskConfig{} - - if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { - cDes.BootDiskType = initial.BootDiskType - } else { - cDes.BootDiskType = des.BootDiskType - } - if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.BootDiskSizeGb = initial.BootDiskSizeGb - } else { - cDes.BootDiskSizeGb = des.BootDiskSizeGb - } - if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumLocalSsds = initial.NumLocalSsds - } else { - cDes.NumLocalSsds = des.NumLocalSsds - } - if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { - cDes.LocalSsdInterface = initial.LocalSsdInterface - } else { - cDes.LocalSsdInterface = des.LocalSsdInterface - } - - return cDes -} - -func canonicalizeClusterConfigMasterConfigDiskConfigSlice(des, initial []ClusterConfigMasterConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigDiskConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMasterConfigDiskConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMasterConfigDiskConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMasterConfigDiskConfig(c *Client, des, nw *ClusterConfigMasterConfigDiskConfig) *ClusterConfigMasterConfigDiskConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { - nw.BootDiskType = des.BootDiskType - } - if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { - nw.LocalSsdInterface = des.LocalSsdInterface - } - - return nw -} - -func canonicalizeNewClusterConfigMasterConfigDiskConfigSet(c *Client, des, nw []ClusterConfigMasterConfigDiskConfig) []ClusterConfigMasterConfigDiskConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMasterConfigDiskConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMasterConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMasterConfigDiskConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMasterConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigMasterConfigDiskConfig) []ClusterConfigMasterConfigDiskConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMasterConfigDiskConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMasterConfigDiskConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMasterConfigManagedGroupConfig(des, initial *ClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigManagedGroupConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMasterConfigManagedGroupConfig{} - - return cDes -} - -func canonicalizeClusterConfigMasterConfigManagedGroupConfigSlice(des, initial []ClusterConfigMasterConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigManagedGroupConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMasterConfigManagedGroupConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMasterConfigManagedGroupConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigMasterConfigManagedGroupConfig) *ClusterConfigMasterConfigManagedGroupConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { - nw.InstanceTemplateName = des.InstanceTemplateName - } - if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { - nw.InstanceGroupManagerName = des.InstanceGroupManagerName - } - - return nw -} - -func canonicalizeNewClusterConfigMasterConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigMasterConfigManagedGroupConfig) []ClusterConfigMasterConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMasterConfigManagedGroupConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMasterConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigMasterConfigManagedGroupConfig) []ClusterConfigMasterConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMasterConfigManagedGroupConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMasterConfigManagedGroupConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMasterConfigAccelerators(des, initial *ClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigAccelerators { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMasterConfigAccelerators{} - - if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { - cDes.AcceleratorType = initial.AcceleratorType - } else { - cDes.AcceleratorType = des.AcceleratorType - } - if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.AcceleratorCount = initial.AcceleratorCount - } else { - cDes.AcceleratorCount = des.AcceleratorCount - } - - return cDes -} - -func canonicalizeClusterConfigMasterConfigAcceleratorsSlice(des, initial []ClusterConfigMasterConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigAccelerators { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMasterConfigAccelerators, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMasterConfigAccelerators(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMasterConfigAccelerators, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMasterConfigAccelerators(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMasterConfigAccelerators(c *Client, des, nw *ClusterConfigMasterConfigAccelerators) *ClusterConfigMasterConfigAccelerators { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { - nw.AcceleratorType = des.AcceleratorType - } - - return nw -} - -func canonicalizeNewClusterConfigMasterConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigMasterConfigAccelerators) []ClusterConfigMasterConfigAccelerators { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMasterConfigAccelerators - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMasterConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMasterConfigAccelerators(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMasterConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigMasterConfigAccelerators) []ClusterConfigMasterConfigAccelerators { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMasterConfigAccelerators - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMasterConfigAccelerators(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMasterConfigInstanceReferences(des, initial *ClusterConfigMasterConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigMasterConfigInstanceReferences { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMasterConfigInstanceReferences{} - - if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { - cDes.InstanceName = initial.InstanceName - } else { - cDes.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { - cDes.InstanceId = initial.InstanceId - } else { - cDes.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { - cDes.PublicKey = initial.PublicKey - } else { - cDes.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { - cDes.PublicEciesKey = initial.PublicEciesKey - } else { - cDes.PublicEciesKey = des.PublicEciesKey - } - - return cDes -} - -func canonicalizeClusterConfigMasterConfigInstanceReferencesSlice(des, initial []ClusterConfigMasterConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigMasterConfigInstanceReferences { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMasterConfigInstanceReferences(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMasterConfigInstanceReferences(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMasterConfigInstanceReferences(c *Client, des, nw *ClusterConfigMasterConfigInstanceReferences) *ClusterConfigMasterConfigInstanceReferences { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMasterConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { - nw.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { - nw.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { - nw.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { - nw.PublicEciesKey = des.PublicEciesKey - } - - return nw -} - -func canonicalizeNewClusterConfigMasterConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigMasterConfigInstanceReferences) []ClusterConfigMasterConfigInstanceReferences { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMasterConfigInstanceReferences - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMasterConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMasterConfigInstanceReferences(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMasterConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigMasterConfigInstanceReferences) []ClusterConfigMasterConfigInstanceReferences { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMasterConfigInstanceReferences - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMasterConfigInstanceReferences(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigWorkerConfig(des, initial *ClusterConfigWorkerConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigWorkerConfig{} - - if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumInstances = initial.NumInstances - } else { - cDes.NumInstances = des.NumInstances - } - if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Image = initial.Image - } else { - cDes.Image = des.Image - } - if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { - cDes.MachineType = initial.MachineType - } else { - cDes.MachineType = des.MachineType - } - cDes.DiskConfig = canonicalizeClusterConfigWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) - if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Preemptibility = initial.Preemptibility - } else { - cDes.Preemptibility = des.Preemptibility - } - cDes.Accelerators = canonicalizeClusterConfigWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) - if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { - cDes.MinCpuPlatform = initial.MinCpuPlatform - } else { - cDes.MinCpuPlatform = des.MinCpuPlatform - } - - return cDes -} - -func canonicalizeClusterConfigWorkerConfigSlice(des, initial []ClusterConfigWorkerConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigWorkerConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigWorkerConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigWorkerConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigWorkerConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigWorkerConfig(c *Client, des, nw *ClusterConfigWorkerConfig) *ClusterConfigWorkerConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { - nw.InstanceNames = des.InstanceNames - } - if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { - nw.MachineType = des.MachineType - } - nw.DiskConfig = canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) - if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { - nw.IsPreemptible = des.IsPreemptible - } - nw.ManagedGroupConfig = canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) - nw.Accelerators = canonicalizeNewClusterConfigWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) - if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { - nw.MinCpuPlatform = des.MinCpuPlatform - } - nw.InstanceReferences = canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) - - return nw -} - -func canonicalizeNewClusterConfigWorkerConfigSet(c *Client, des, nw []ClusterConfigWorkerConfig) []ClusterConfigWorkerConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigWorkerConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigWorkerConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigWorkerConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfig) []ClusterConfigWorkerConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigWorkerConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigWorkerConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigWorkerConfigDiskConfig(des, initial *ClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigDiskConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigWorkerConfigDiskConfig{} - - if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { - cDes.BootDiskType = initial.BootDiskType - } else { - cDes.BootDiskType = des.BootDiskType - } - if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.BootDiskSizeGb = initial.BootDiskSizeGb - } else { - cDes.BootDiskSizeGb = des.BootDiskSizeGb - } - if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumLocalSsds = initial.NumLocalSsds - } else { - cDes.NumLocalSsds = des.NumLocalSsds - } - if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { - cDes.LocalSsdInterface = initial.LocalSsdInterface - } else { - cDes.LocalSsdInterface = des.LocalSsdInterface - } - - return cDes -} - -func canonicalizeClusterConfigWorkerConfigDiskConfigSlice(des, initial []ClusterConfigWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigDiskConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigWorkerConfigDiskConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigWorkerConfigDiskConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigWorkerConfigDiskConfig(c *Client, des, nw *ClusterConfigWorkerConfigDiskConfig) *ClusterConfigWorkerConfigDiskConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { - nw.BootDiskType = des.BootDiskType - } - if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { - nw.LocalSsdInterface = des.LocalSsdInterface - } - - return nw -} - -func canonicalizeNewClusterConfigWorkerConfigDiskConfigSet(c *Client, des, nw []ClusterConfigWorkerConfigDiskConfig) []ClusterConfigWorkerConfigDiskConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigWorkerConfigDiskConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigWorkerConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfigDiskConfig) []ClusterConfigWorkerConfigDiskConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigWorkerConfigDiskConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigWorkerConfigDiskConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigWorkerConfigManagedGroupConfig(des, initial *ClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigManagedGroupConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigWorkerConfigManagedGroupConfig{} - - return cDes -} - -func canonicalizeClusterConfigWorkerConfigManagedGroupConfigSlice(des, initial []ClusterConfigWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigManagedGroupConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigWorkerConfigManagedGroupConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigWorkerConfigManagedGroupConfig) *ClusterConfigWorkerConfigManagedGroupConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { - nw.InstanceTemplateName = des.InstanceTemplateName - } - if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { - nw.InstanceGroupManagerName = des.InstanceGroupManagerName - } - - return nw -} - -func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigWorkerConfigManagedGroupConfig) []ClusterConfigWorkerConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigWorkerConfigManagedGroupConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigWorkerConfigManagedGroupConfig) []ClusterConfigWorkerConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigWorkerConfigManagedGroupConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigWorkerConfigManagedGroupConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigWorkerConfigAccelerators(des, initial *ClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigAccelerators { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigWorkerConfigAccelerators{} - - if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { - cDes.AcceleratorType = initial.AcceleratorType - } else { - cDes.AcceleratorType = des.AcceleratorType - } - if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.AcceleratorCount = initial.AcceleratorCount - } else { - cDes.AcceleratorCount = des.AcceleratorCount - } - - return cDes -} - -func canonicalizeClusterConfigWorkerConfigAcceleratorsSlice(des, initial []ClusterConfigWorkerConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigAccelerators { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigWorkerConfigAccelerators(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigWorkerConfigAccelerators(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigWorkerConfigAccelerators(c *Client, des, nw *ClusterConfigWorkerConfigAccelerators) *ClusterConfigWorkerConfigAccelerators { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { - nw.AcceleratorType = des.AcceleratorType - } - - return nw -} - -func canonicalizeNewClusterConfigWorkerConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigWorkerConfigAccelerators) []ClusterConfigWorkerConfigAccelerators { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigWorkerConfigAccelerators - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigWorkerConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigWorkerConfigAccelerators) []ClusterConfigWorkerConfigAccelerators { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigWorkerConfigAccelerators - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigWorkerConfigAccelerators(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigWorkerConfigInstanceReferences(des, initial *ClusterConfigWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigWorkerConfigInstanceReferences { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigWorkerConfigInstanceReferences{} - - if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { - cDes.InstanceName = initial.InstanceName - } else { - cDes.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { - cDes.InstanceId = initial.InstanceId - } else { - cDes.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { - cDes.PublicKey = initial.PublicKey - } else { - cDes.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { - cDes.PublicEciesKey = initial.PublicEciesKey - } else { - cDes.PublicEciesKey = des.PublicEciesKey - } - - return cDes -} - -func canonicalizeClusterConfigWorkerConfigInstanceReferencesSlice(des, initial []ClusterConfigWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigWorkerConfigInstanceReferences { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigWorkerConfigInstanceReferences(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigWorkerConfigInstanceReferences(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c *Client, des, nw *ClusterConfigWorkerConfigInstanceReferences) *ClusterConfigWorkerConfigInstanceReferences { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigWorkerConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { - nw.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { - nw.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { - nw.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { - nw.PublicEciesKey = des.PublicEciesKey - } - - return nw -} - -func canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigWorkerConfigInstanceReferences) []ClusterConfigWorkerConfigInstanceReferences { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigWorkerConfigInstanceReferences - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigWorkerConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigWorkerConfigInstanceReferences) []ClusterConfigWorkerConfigInstanceReferences { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigWorkerConfigInstanceReferences - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigWorkerConfigInstanceReferences(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecondaryWorkerConfig(des, initial *ClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecondaryWorkerConfig{} - - if dcl.IsZeroValue(des.NumInstances) || (dcl.IsEmptyValueIndirect(des.NumInstances) && dcl.IsEmptyValueIndirect(initial.NumInstances)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumInstances = initial.NumInstances - } else { - cDes.NumInstances = des.NumInstances - } - if dcl.IsZeroValue(des.Image) || (dcl.IsEmptyValueIndirect(des.Image) && dcl.IsEmptyValueIndirect(initial.Image)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Image = initial.Image - } else { - cDes.Image = des.Image - } - if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { - cDes.MachineType = initial.MachineType - } else { - cDes.MachineType = des.MachineType - } - cDes.DiskConfig = canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(des.DiskConfig, initial.DiskConfig, opts...) - if dcl.IsZeroValue(des.Preemptibility) || (dcl.IsEmptyValueIndirect(des.Preemptibility) && dcl.IsEmptyValueIndirect(initial.Preemptibility)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Preemptibility = initial.Preemptibility - } else { - cDes.Preemptibility = des.Preemptibility - } - cDes.Accelerators = canonicalizeClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) - if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { - cDes.MinCpuPlatform = initial.MinCpuPlatform - } else { - cDes.MinCpuPlatform = des.MinCpuPlatform - } - - return cDes -} - -func canonicalizeClusterConfigSecondaryWorkerConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfig) *ClusterConfigSecondaryWorkerConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.InstanceNames, nw.InstanceNames) { - nw.InstanceNames = des.InstanceNames - } - if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { - nw.MachineType = des.MachineType - } - nw.DiskConfig = canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, des.DiskConfig, nw.DiskConfig) - if dcl.BoolCanonicalize(des.IsPreemptible, nw.IsPreemptible) { - nw.IsPreemptible = des.IsPreemptible - } - nw.ManagedGroupConfig = canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, des.ManagedGroupConfig, nw.ManagedGroupConfig) - nw.Accelerators = canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) - if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { - nw.MinCpuPlatform = des.MinCpuPlatform - } - nw.InstanceReferences = canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c, des.InstanceReferences, nw.InstanceReferences) - - return nw -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfig) []ClusterConfigSecondaryWorkerConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecondaryWorkerConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecondaryWorkerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfig) []ClusterConfigSecondaryWorkerConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecondaryWorkerConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(des, initial *ClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigDiskConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecondaryWorkerConfigDiskConfig{} - - if dcl.StringCanonicalize(des.BootDiskType, initial.BootDiskType) || dcl.IsZeroValue(des.BootDiskType) { - cDes.BootDiskType = initial.BootDiskType - } else { - cDes.BootDiskType = des.BootDiskType - } - if dcl.IsZeroValue(des.BootDiskSizeGb) || (dcl.IsEmptyValueIndirect(des.BootDiskSizeGb) && dcl.IsEmptyValueIndirect(initial.BootDiskSizeGb)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.BootDiskSizeGb = initial.BootDiskSizeGb - } else { - cDes.BootDiskSizeGb = des.BootDiskSizeGb - } - if dcl.IsZeroValue(des.NumLocalSsds) || (dcl.IsEmptyValueIndirect(des.NumLocalSsds) && dcl.IsEmptyValueIndirect(initial.NumLocalSsds)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumLocalSsds = initial.NumLocalSsds - } else { - cDes.NumLocalSsds = des.NumLocalSsds - } - if dcl.StringCanonicalize(des.LocalSsdInterface, initial.LocalSsdInterface) || dcl.IsZeroValue(des.LocalSsdInterface) { - cDes.LocalSsdInterface = initial.LocalSsdInterface - } else { - cDes.LocalSsdInterface = des.LocalSsdInterface - } - - return cDes -} - -func canonicalizeClusterConfigSecondaryWorkerConfigDiskConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfigDiskConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigDiskConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigDiskConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigDiskConfig) *ClusterConfigSecondaryWorkerConfigDiskConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigDiskConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.BootDiskType, nw.BootDiskType) { - nw.BootDiskType = des.BootDiskType - } - if dcl.StringCanonicalize(des.LocalSsdInterface, nw.LocalSsdInterface) { - nw.LocalSsdInterface = des.LocalSsdInterface - } - - return nw -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigDiskConfig) []ClusterConfigSecondaryWorkerConfigDiskConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecondaryWorkerConfigDiskConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigDiskConfig) []ClusterConfigSecondaryWorkerConfigDiskConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecondaryWorkerConfigDiskConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigDiskConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(des, initial *ClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - - return cDes -} - -func canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(des, initial []ClusterConfigSecondaryWorkerConfigManagedGroupConfig, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigManagedGroupConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigManagedGroupConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceTemplateName, nw.InstanceTemplateName) { - nw.InstanceTemplateName = des.InstanceTemplateName - } - if dcl.StringCanonicalize(des.InstanceGroupManagerName, nw.InstanceGroupManagerName) { - nw.InstanceGroupManagerName = des.InstanceGroupManagerName - } - - return nw -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfigSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigManagedGroupConfig) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecondaryWorkerConfigManagedGroupConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigManagedGroupConfig) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecondaryWorkerConfigManagedGroupConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(des, initial *ClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigAccelerators { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecondaryWorkerConfigAccelerators{} - - if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { - cDes.AcceleratorType = initial.AcceleratorType - } else { - cDes.AcceleratorType = des.AcceleratorType - } - if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.AcceleratorCount = initial.AcceleratorCount - } else { - cDes.AcceleratorCount = des.AcceleratorCount - } - - return cDes -} - -func canonicalizeClusterConfigSecondaryWorkerConfigAcceleratorsSlice(des, initial []ClusterConfigSecondaryWorkerConfigAccelerators, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigAccelerators { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigAccelerators(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigAccelerators) *ClusterConfigSecondaryWorkerConfigAccelerators { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { - nw.AcceleratorType = des.AcceleratorType - } - - return nw -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigAccelerators) []ClusterConfigSecondaryWorkerConfigAccelerators { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecondaryWorkerConfigAccelerators - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigAccelerators) []ClusterConfigSecondaryWorkerConfigAccelerators { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecondaryWorkerConfigAccelerators - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigAccelerators(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(des, initial *ClusterConfigSecondaryWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) *ClusterConfigSecondaryWorkerConfigInstanceReferences { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecondaryWorkerConfigInstanceReferences{} - - if dcl.StringCanonicalize(des.InstanceName, initial.InstanceName) || dcl.IsZeroValue(des.InstanceName) { - cDes.InstanceName = initial.InstanceName - } else { - cDes.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, initial.InstanceId) || dcl.IsZeroValue(des.InstanceId) { - cDes.InstanceId = initial.InstanceId - } else { - cDes.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, initial.PublicKey) || dcl.IsZeroValue(des.PublicKey) { - cDes.PublicKey = initial.PublicKey - } else { - cDes.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, initial.PublicEciesKey) || dcl.IsZeroValue(des.PublicEciesKey) { - cDes.PublicEciesKey = initial.PublicEciesKey - } else { - cDes.PublicEciesKey = des.PublicEciesKey - } - - return cDes -} - -func canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(des, initial []ClusterConfigSecondaryWorkerConfigInstanceReferences, opts ...dcl.ApplyOption) []ClusterConfigSecondaryWorkerConfigInstanceReferences { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecondaryWorkerConfigInstanceReferences(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, des, nw *ClusterConfigSecondaryWorkerConfigInstanceReferences) *ClusterConfigSecondaryWorkerConfigInstanceReferences { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecondaryWorkerConfigInstanceReferences while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.InstanceName, nw.InstanceName) { - nw.InstanceName = des.InstanceName - } - if dcl.StringCanonicalize(des.InstanceId, nw.InstanceId) { - nw.InstanceId = des.InstanceId - } - if dcl.StringCanonicalize(des.PublicKey, nw.PublicKey) { - nw.PublicKey = des.PublicKey - } - if dcl.StringCanonicalize(des.PublicEciesKey, nw.PublicEciesKey) { - nw.PublicEciesKey = des.PublicEciesKey - } - - return nw -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSet(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigInstanceReferences) []ClusterConfigSecondaryWorkerConfigInstanceReferences { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecondaryWorkerConfigInstanceReferences - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, des, nw []ClusterConfigSecondaryWorkerConfigInstanceReferences) []ClusterConfigSecondaryWorkerConfigInstanceReferences { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecondaryWorkerConfigInstanceReferences - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecondaryWorkerConfigInstanceReferences(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSoftwareConfig(des, initial *ClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) *ClusterConfigSoftwareConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSoftwareConfig{} - - if dcl.StringCanonicalize(des.ImageVersion, initial.ImageVersion) || dcl.IsZeroValue(des.ImageVersion) { - cDes.ImageVersion = initial.ImageVersion - } else { - cDes.ImageVersion = des.ImageVersion - } - if canonicalizeSoftwareConfigProperties(des.Properties, initial.Properties) || dcl.IsZeroValue(des.Properties) { - cDes.Properties = initial.Properties - } else { - cDes.Properties = des.Properties - } - if dcl.IsZeroValue(des.OptionalComponents) || (dcl.IsEmptyValueIndirect(des.OptionalComponents) && dcl.IsEmptyValueIndirect(initial.OptionalComponents)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.OptionalComponents = initial.OptionalComponents - } else { - cDes.OptionalComponents = des.OptionalComponents - } - - return cDes -} - -func canonicalizeClusterConfigSoftwareConfigSlice(des, initial []ClusterConfigSoftwareConfig, opts ...dcl.ApplyOption) []ClusterConfigSoftwareConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSoftwareConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSoftwareConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSoftwareConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSoftwareConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSoftwareConfig(c *Client, des, nw *ClusterConfigSoftwareConfig) *ClusterConfigSoftwareConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.ImageVersion, nw.ImageVersion) { - nw.ImageVersion = des.ImageVersion - } - if canonicalizeSoftwareConfigProperties(des.Properties, nw.Properties) { - nw.Properties = des.Properties - } - - return nw -} - -func canonicalizeNewClusterConfigSoftwareConfigSet(c *Client, des, nw []ClusterConfigSoftwareConfig) []ClusterConfigSoftwareConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSoftwareConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSoftwareConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSoftwareConfigSlice(c *Client, des, nw []ClusterConfigSoftwareConfig) []ClusterConfigSoftwareConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSoftwareConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSoftwareConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigInitializationActions(des, initial *ClusterConfigInitializationActions, opts ...dcl.ApplyOption) *ClusterConfigInitializationActions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigInitializationActions{} - - if dcl.StringCanonicalize(des.ExecutableFile, initial.ExecutableFile) || dcl.IsZeroValue(des.ExecutableFile) { - cDes.ExecutableFile = initial.ExecutableFile - } else { - cDes.ExecutableFile = des.ExecutableFile - } - if dcl.StringCanonicalize(des.ExecutionTimeout, initial.ExecutionTimeout) || dcl.IsZeroValue(des.ExecutionTimeout) { - cDes.ExecutionTimeout = initial.ExecutionTimeout - } else { - cDes.ExecutionTimeout = des.ExecutionTimeout - } - - return cDes -} - -func canonicalizeClusterConfigInitializationActionsSlice(des, initial []ClusterConfigInitializationActions, opts ...dcl.ApplyOption) []ClusterConfigInitializationActions { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigInitializationActions, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigInitializationActions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigInitializationActions, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigInitializationActions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigInitializationActions(c *Client, des, nw *ClusterConfigInitializationActions) *ClusterConfigInitializationActions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigInitializationActions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.ExecutableFile, nw.ExecutableFile) { - nw.ExecutableFile = des.ExecutableFile - } - if dcl.StringCanonicalize(des.ExecutionTimeout, nw.ExecutionTimeout) { - nw.ExecutionTimeout = des.ExecutionTimeout - } - - return nw -} - -func canonicalizeNewClusterConfigInitializationActionsSet(c *Client, des, nw []ClusterConfigInitializationActions) []ClusterConfigInitializationActions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigInitializationActions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigInitializationActionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigInitializationActions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigInitializationActionsSlice(c *Client, des, nw []ClusterConfigInitializationActions) []ClusterConfigInitializationActions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigInitializationActions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigInitializationActions(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigEncryptionConfig(des, initial *ClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) *ClusterConfigEncryptionConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigEncryptionConfig{} - - if dcl.IsZeroValue(des.GcePdKmsKeyName) || (dcl.IsEmptyValueIndirect(des.GcePdKmsKeyName) && dcl.IsEmptyValueIndirect(initial.GcePdKmsKeyName)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.GcePdKmsKeyName = initial.GcePdKmsKeyName - } else { - cDes.GcePdKmsKeyName = des.GcePdKmsKeyName - } - - return cDes -} - -func canonicalizeClusterConfigEncryptionConfigSlice(des, initial []ClusterConfigEncryptionConfig, opts ...dcl.ApplyOption) []ClusterConfigEncryptionConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigEncryptionConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigEncryptionConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigEncryptionConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigEncryptionConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigEncryptionConfig(c *Client, des, nw *ClusterConfigEncryptionConfig) *ClusterConfigEncryptionConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigEncryptionConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterConfigEncryptionConfigSet(c *Client, des, nw []ClusterConfigEncryptionConfig) []ClusterConfigEncryptionConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigEncryptionConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigEncryptionConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigEncryptionConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigEncryptionConfigSlice(c *Client, des, nw []ClusterConfigEncryptionConfig) []ClusterConfigEncryptionConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigEncryptionConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigEncryptionConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigAutoscalingConfig(des, initial *ClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) *ClusterConfigAutoscalingConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigAutoscalingConfig{} - - if dcl.IsZeroValue(des.Policy) || (dcl.IsEmptyValueIndirect(des.Policy) && dcl.IsEmptyValueIndirect(initial.Policy)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Policy = initial.Policy - } else { - cDes.Policy = des.Policy - } - - return cDes -} - -func canonicalizeClusterConfigAutoscalingConfigSlice(des, initial []ClusterConfigAutoscalingConfig, opts ...dcl.ApplyOption) []ClusterConfigAutoscalingConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigAutoscalingConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigAutoscalingConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigAutoscalingConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigAutoscalingConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigAutoscalingConfig(c *Client, des, nw *ClusterConfigAutoscalingConfig) *ClusterConfigAutoscalingConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigAutoscalingConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterConfigAutoscalingConfigSet(c *Client, des, nw []ClusterConfigAutoscalingConfig) []ClusterConfigAutoscalingConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigAutoscalingConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigAutoscalingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigAutoscalingConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigAutoscalingConfigSlice(c *Client, des, nw []ClusterConfigAutoscalingConfig) []ClusterConfigAutoscalingConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigAutoscalingConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigAutoscalingConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecurityConfig(des, initial *ClusterConfigSecurityConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecurityConfig{} - - cDes.KerberosConfig = canonicalizeClusterConfigSecurityConfigKerberosConfig(des.KerberosConfig, initial.KerberosConfig, opts...) - cDes.IdentityConfig = canonicalizeClusterConfigSecurityConfigIdentityConfig(des.IdentityConfig, initial.IdentityConfig, opts...) - - return cDes -} - -func canonicalizeClusterConfigSecurityConfigSlice(des, initial []ClusterConfigSecurityConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecurityConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecurityConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecurityConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecurityConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecurityConfig(c *Client, des, nw *ClusterConfigSecurityConfig) *ClusterConfigSecurityConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.KerberosConfig = canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, des.KerberosConfig, nw.KerberosConfig) - nw.IdentityConfig = canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, des.IdentityConfig, nw.IdentityConfig) - - return nw -} - -func canonicalizeNewClusterConfigSecurityConfigSet(c *Client, des, nw []ClusterConfigSecurityConfig) []ClusterConfigSecurityConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecurityConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecurityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecurityConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecurityConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfig) []ClusterConfigSecurityConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecurityConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecurityConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecurityConfigKerberosConfig(des, initial *ClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfigKerberosConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecurityConfigKerberosConfig{} - - if dcl.BoolCanonicalize(des.EnableKerberos, initial.EnableKerberos) || dcl.IsZeroValue(des.EnableKerberos) { - cDes.EnableKerberos = initial.EnableKerberos - } else { - cDes.EnableKerberos = des.EnableKerberos - } - if dcl.StringCanonicalize(des.RootPrincipalPassword, initial.RootPrincipalPassword) || dcl.IsZeroValue(des.RootPrincipalPassword) { - cDes.RootPrincipalPassword = initial.RootPrincipalPassword - } else { - cDes.RootPrincipalPassword = des.RootPrincipalPassword - } - if dcl.IsZeroValue(des.KmsKey) || (dcl.IsEmptyValueIndirect(des.KmsKey) && dcl.IsEmptyValueIndirect(initial.KmsKey)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.KmsKey = initial.KmsKey - } else { - cDes.KmsKey = des.KmsKey - } - if dcl.StringCanonicalize(des.Keystore, initial.Keystore) || dcl.IsZeroValue(des.Keystore) { - cDes.Keystore = initial.Keystore - } else { - cDes.Keystore = des.Keystore - } - if dcl.StringCanonicalize(des.Truststore, initial.Truststore) || dcl.IsZeroValue(des.Truststore) { - cDes.Truststore = initial.Truststore - } else { - cDes.Truststore = des.Truststore - } - if dcl.StringCanonicalize(des.KeystorePassword, initial.KeystorePassword) || dcl.IsZeroValue(des.KeystorePassword) { - cDes.KeystorePassword = initial.KeystorePassword - } else { - cDes.KeystorePassword = des.KeystorePassword - } - if dcl.StringCanonicalize(des.KeyPassword, initial.KeyPassword) || dcl.IsZeroValue(des.KeyPassword) { - cDes.KeyPassword = initial.KeyPassword - } else { - cDes.KeyPassword = des.KeyPassword - } - if dcl.StringCanonicalize(des.TruststorePassword, initial.TruststorePassword) || dcl.IsZeroValue(des.TruststorePassword) { - cDes.TruststorePassword = initial.TruststorePassword - } else { - cDes.TruststorePassword = des.TruststorePassword - } - if dcl.StringCanonicalize(des.CrossRealmTrustRealm, initial.CrossRealmTrustRealm) || dcl.IsZeroValue(des.CrossRealmTrustRealm) { - cDes.CrossRealmTrustRealm = initial.CrossRealmTrustRealm - } else { - cDes.CrossRealmTrustRealm = des.CrossRealmTrustRealm - } - if dcl.StringCanonicalize(des.CrossRealmTrustKdc, initial.CrossRealmTrustKdc) || dcl.IsZeroValue(des.CrossRealmTrustKdc) { - cDes.CrossRealmTrustKdc = initial.CrossRealmTrustKdc - } else { - cDes.CrossRealmTrustKdc = des.CrossRealmTrustKdc - } - if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, initial.CrossRealmTrustAdminServer) || dcl.IsZeroValue(des.CrossRealmTrustAdminServer) { - cDes.CrossRealmTrustAdminServer = initial.CrossRealmTrustAdminServer - } else { - cDes.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer - } - if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, initial.CrossRealmTrustSharedPassword) || dcl.IsZeroValue(des.CrossRealmTrustSharedPassword) { - cDes.CrossRealmTrustSharedPassword = initial.CrossRealmTrustSharedPassword - } else { - cDes.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword - } - if dcl.StringCanonicalize(des.KdcDbKey, initial.KdcDbKey) || dcl.IsZeroValue(des.KdcDbKey) { - cDes.KdcDbKey = initial.KdcDbKey - } else { - cDes.KdcDbKey = des.KdcDbKey - } - if dcl.IsZeroValue(des.TgtLifetimeHours) || (dcl.IsEmptyValueIndirect(des.TgtLifetimeHours) && dcl.IsEmptyValueIndirect(initial.TgtLifetimeHours)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.TgtLifetimeHours = initial.TgtLifetimeHours - } else { - cDes.TgtLifetimeHours = des.TgtLifetimeHours - } - if dcl.StringCanonicalize(des.Realm, initial.Realm) || dcl.IsZeroValue(des.Realm) { - cDes.Realm = initial.Realm - } else { - cDes.Realm = des.Realm - } - - return cDes -} - -func canonicalizeClusterConfigSecurityConfigKerberosConfigSlice(des, initial []ClusterConfigSecurityConfigKerberosConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfigKerberosConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecurityConfigKerberosConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecurityConfigKerberosConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c *Client, des, nw *ClusterConfigSecurityConfigKerberosConfig) *ClusterConfigSecurityConfigKerberosConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfigKerberosConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.EnableKerberos, nw.EnableKerberos) { - nw.EnableKerberos = des.EnableKerberos - } - if dcl.StringCanonicalize(des.RootPrincipalPassword, nw.RootPrincipalPassword) { - nw.RootPrincipalPassword = des.RootPrincipalPassword - } - if dcl.StringCanonicalize(des.Keystore, nw.Keystore) { - nw.Keystore = des.Keystore - } - if dcl.StringCanonicalize(des.Truststore, nw.Truststore) { - nw.Truststore = des.Truststore - } - if dcl.StringCanonicalize(des.KeystorePassword, nw.KeystorePassword) { - nw.KeystorePassword = des.KeystorePassword - } - if dcl.StringCanonicalize(des.KeyPassword, nw.KeyPassword) { - nw.KeyPassword = des.KeyPassword - } - if dcl.StringCanonicalize(des.TruststorePassword, nw.TruststorePassword) { - nw.TruststorePassword = des.TruststorePassword - } - if dcl.StringCanonicalize(des.CrossRealmTrustRealm, nw.CrossRealmTrustRealm) { - nw.CrossRealmTrustRealm = des.CrossRealmTrustRealm - } - if dcl.StringCanonicalize(des.CrossRealmTrustKdc, nw.CrossRealmTrustKdc) { - nw.CrossRealmTrustKdc = des.CrossRealmTrustKdc - } - if dcl.StringCanonicalize(des.CrossRealmTrustAdminServer, nw.CrossRealmTrustAdminServer) { - nw.CrossRealmTrustAdminServer = des.CrossRealmTrustAdminServer - } - if dcl.StringCanonicalize(des.CrossRealmTrustSharedPassword, nw.CrossRealmTrustSharedPassword) { - nw.CrossRealmTrustSharedPassword = des.CrossRealmTrustSharedPassword - } - if dcl.StringCanonicalize(des.KdcDbKey, nw.KdcDbKey) { - nw.KdcDbKey = des.KdcDbKey - } - if dcl.StringCanonicalize(des.Realm, nw.Realm) { - nw.Realm = des.Realm - } - - return nw -} - -func canonicalizeNewClusterConfigSecurityConfigKerberosConfigSet(c *Client, des, nw []ClusterConfigSecurityConfigKerberosConfig) []ClusterConfigSecurityConfigKerberosConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecurityConfigKerberosConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecurityConfigKerberosConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecurityConfigKerberosConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfigKerberosConfig) []ClusterConfigSecurityConfigKerberosConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecurityConfigKerberosConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecurityConfigKerberosConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigSecurityConfigIdentityConfig(des, initial *ClusterConfigSecurityConfigIdentityConfig, opts ...dcl.ApplyOption) *ClusterConfigSecurityConfigIdentityConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigSecurityConfigIdentityConfig{} - - if dcl.IsZeroValue(des.UserServiceAccountMapping) || (dcl.IsEmptyValueIndirect(des.UserServiceAccountMapping) && dcl.IsEmptyValueIndirect(initial.UserServiceAccountMapping)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UserServiceAccountMapping = initial.UserServiceAccountMapping - } else { - cDes.UserServiceAccountMapping = des.UserServiceAccountMapping - } - - return cDes -} - -func canonicalizeClusterConfigSecurityConfigIdentityConfigSlice(des, initial []ClusterConfigSecurityConfigIdentityConfig, opts ...dcl.ApplyOption) []ClusterConfigSecurityConfigIdentityConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigSecurityConfigIdentityConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigSecurityConfigIdentityConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c *Client, des, nw *ClusterConfigSecurityConfigIdentityConfig) *ClusterConfigSecurityConfigIdentityConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigSecurityConfigIdentityConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterConfigSecurityConfigIdentityConfigSet(c *Client, des, nw []ClusterConfigSecurityConfigIdentityConfig) []ClusterConfigSecurityConfigIdentityConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigSecurityConfigIdentityConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigSecurityConfigIdentityConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigSecurityConfigIdentityConfigSlice(c *Client, des, nw []ClusterConfigSecurityConfigIdentityConfig) []ClusterConfigSecurityConfigIdentityConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigSecurityConfigIdentityConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigSecurityConfigIdentityConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigLifecycleConfig(des, initial *ClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) *ClusterConfigLifecycleConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigLifecycleConfig{} - - if dcl.StringCanonicalize(des.IdleDeleteTtl, initial.IdleDeleteTtl) || dcl.IsZeroValue(des.IdleDeleteTtl) { - cDes.IdleDeleteTtl = initial.IdleDeleteTtl - } else { - cDes.IdleDeleteTtl = des.IdleDeleteTtl - } - if dcl.IsZeroValue(des.AutoDeleteTime) || (dcl.IsEmptyValueIndirect(des.AutoDeleteTime) && dcl.IsEmptyValueIndirect(initial.AutoDeleteTime)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.AutoDeleteTime = initial.AutoDeleteTime - } else { - cDes.AutoDeleteTime = des.AutoDeleteTime - } - if dcl.StringCanonicalize(des.AutoDeleteTtl, initial.AutoDeleteTtl) || dcl.IsZeroValue(des.AutoDeleteTtl) { - cDes.AutoDeleteTtl = initial.AutoDeleteTtl - } else { - cDes.AutoDeleteTtl = des.AutoDeleteTtl - } - - return cDes -} - -func canonicalizeClusterConfigLifecycleConfigSlice(des, initial []ClusterConfigLifecycleConfig, opts ...dcl.ApplyOption) []ClusterConfigLifecycleConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigLifecycleConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigLifecycleConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigLifecycleConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigLifecycleConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigLifecycleConfig(c *Client, des, nw *ClusterConfigLifecycleConfig) *ClusterConfigLifecycleConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigLifecycleConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.IdleDeleteTtl, nw.IdleDeleteTtl) { - nw.IdleDeleteTtl = des.IdleDeleteTtl - } - if dcl.StringCanonicalize(des.AutoDeleteTtl, nw.AutoDeleteTtl) { - nw.AutoDeleteTtl = des.AutoDeleteTtl - } - - return nw -} - -func canonicalizeNewClusterConfigLifecycleConfigSet(c *Client, des, nw []ClusterConfigLifecycleConfig) []ClusterConfigLifecycleConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigLifecycleConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigLifecycleConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigLifecycleConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigLifecycleConfigSlice(c *Client, des, nw []ClusterConfigLifecycleConfig) []ClusterConfigLifecycleConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigLifecycleConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigLifecycleConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigEndpointConfig(des, initial *ClusterConfigEndpointConfig, opts ...dcl.ApplyOption) *ClusterConfigEndpointConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigEndpointConfig{} - - if dcl.BoolCanonicalize(des.EnableHttpPortAccess, initial.EnableHttpPortAccess) || dcl.IsZeroValue(des.EnableHttpPortAccess) { - cDes.EnableHttpPortAccess = initial.EnableHttpPortAccess - } else { - cDes.EnableHttpPortAccess = des.EnableHttpPortAccess - } - - return cDes -} - -func canonicalizeClusterConfigEndpointConfigSlice(des, initial []ClusterConfigEndpointConfig, opts ...dcl.ApplyOption) []ClusterConfigEndpointConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigEndpointConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigEndpointConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigEndpointConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigEndpointConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigEndpointConfig(c *Client, des, nw *ClusterConfigEndpointConfig) *ClusterConfigEndpointConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigEndpointConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.EnableHttpPortAccess, nw.EnableHttpPortAccess) { - nw.EnableHttpPortAccess = des.EnableHttpPortAccess - } - - return nw -} - -func canonicalizeNewClusterConfigEndpointConfigSet(c *Client, des, nw []ClusterConfigEndpointConfig) []ClusterConfigEndpointConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigEndpointConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigEndpointConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigEndpointConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigEndpointConfigSlice(c *Client, des, nw []ClusterConfigEndpointConfig) []ClusterConfigEndpointConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigEndpointConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigEndpointConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGkeClusterConfig(des, initial *ClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *ClusterConfigGkeClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGkeClusterConfig{} - - cDes.NamespacedGkeDeploymentTarget = canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des.NamespacedGkeDeploymentTarget, initial.NamespacedGkeDeploymentTarget, opts...) - - return cDes -} - -func canonicalizeClusterConfigGkeClusterConfigSlice(des, initial []ClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []ClusterConfigGkeClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGkeClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGkeClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGkeClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGkeClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGkeClusterConfig(c *Client, des, nw *ClusterConfigGkeClusterConfig) *ClusterConfigGkeClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.NamespacedGkeDeploymentTarget = canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, des.NamespacedGkeDeploymentTarget, nw.NamespacedGkeDeploymentTarget) - - return nw -} - -func canonicalizeNewClusterConfigGkeClusterConfigSet(c *Client, des, nw []ClusterConfigGkeClusterConfig) []ClusterConfigGkeClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGkeClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGkeClusterConfigSlice(c *Client, des, nw []ClusterConfigGkeClusterConfig) []ClusterConfigGkeClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGkeClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGkeClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(des, initial *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - - if dcl.IsZeroValue(des.TargetGkeCluster) || (dcl.IsEmptyValueIndirect(des.TargetGkeCluster) && dcl.IsEmptyValueIndirect(initial.TargetGkeCluster)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.TargetGkeCluster = initial.TargetGkeCluster - } else { - cDes.TargetGkeCluster = des.TargetGkeCluster - } - if dcl.StringCanonicalize(des.ClusterNamespace, initial.ClusterNamespace) || dcl.IsZeroValue(des.ClusterNamespace) { - cDes.ClusterNamespace = initial.ClusterNamespace - } else { - cDes.ClusterNamespace = des.ClusterNamespace - } - - return cDes -} - -func canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(des, initial []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, opts ...dcl.ApplyOption) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, des, nw *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.ClusterNamespace, nw.ClusterNamespace) { - nw.ClusterNamespace = des.ClusterNamespace - } - - return nw -} - -func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSet(c *Client, des, nw []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, des, nw []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigMetastoreConfig(des, initial *ClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) *ClusterConfigMetastoreConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigMetastoreConfig{} - - if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.DataprocMetastoreService = initial.DataprocMetastoreService - } else { - cDes.DataprocMetastoreService = des.DataprocMetastoreService - } - - return cDes -} - -func canonicalizeClusterConfigMetastoreConfigSlice(des, initial []ClusterConfigMetastoreConfig, opts ...dcl.ApplyOption) []ClusterConfigMetastoreConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigMetastoreConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigMetastoreConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigMetastoreConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigMetastoreConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigMetastoreConfig(c *Client, des, nw *ClusterConfigMetastoreConfig) *ClusterConfigMetastoreConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterConfigMetastoreConfigSet(c *Client, des, nw []ClusterConfigMetastoreConfig) []ClusterConfigMetastoreConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigMetastoreConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigMetastoreConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigMetastoreConfigSlice(c *Client, des, nw []ClusterConfigMetastoreConfig) []ClusterConfigMetastoreConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigMetastoreConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigMetastoreConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigDataprocMetricConfig(des, initial *ClusterConfigDataprocMetricConfig, opts ...dcl.ApplyOption) *ClusterConfigDataprocMetricConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigDataprocMetricConfig{} - - cDes.Metrics = canonicalizeClusterConfigDataprocMetricConfigMetricsSlice(des.Metrics, initial.Metrics, opts...) - - return cDes -} - -func canonicalizeClusterConfigDataprocMetricConfigSlice(des, initial []ClusterConfigDataprocMetricConfig, opts ...dcl.ApplyOption) []ClusterConfigDataprocMetricConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigDataprocMetricConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigDataprocMetricConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigDataprocMetricConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigDataprocMetricConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigDataprocMetricConfig(c *Client, des, nw *ClusterConfigDataprocMetricConfig) *ClusterConfigDataprocMetricConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigDataprocMetricConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Metrics = canonicalizeNewClusterConfigDataprocMetricConfigMetricsSlice(c, des.Metrics, nw.Metrics) - - return nw -} - -func canonicalizeNewClusterConfigDataprocMetricConfigSet(c *Client, des, nw []ClusterConfigDataprocMetricConfig) []ClusterConfigDataprocMetricConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigDataprocMetricConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigDataprocMetricConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigDataprocMetricConfigSlice(c *Client, des, nw []ClusterConfigDataprocMetricConfig) []ClusterConfigDataprocMetricConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigDataprocMetricConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterConfigDataprocMetricConfigMetrics(des, initial *ClusterConfigDataprocMetricConfigMetrics, opts ...dcl.ApplyOption) *ClusterConfigDataprocMetricConfigMetrics { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterConfigDataprocMetricConfigMetrics{} - - if dcl.IsZeroValue(des.MetricSource) || (dcl.IsEmptyValueIndirect(des.MetricSource) && dcl.IsEmptyValueIndirect(initial.MetricSource)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MetricSource = initial.MetricSource - } else { - cDes.MetricSource = des.MetricSource - } - if dcl.StringArrayCanonicalize(des.MetricOverrides, initial.MetricOverrides) { - cDes.MetricOverrides = initial.MetricOverrides - } else { - cDes.MetricOverrides = des.MetricOverrides - } - - return cDes -} - -func canonicalizeClusterConfigDataprocMetricConfigMetricsSlice(des, initial []ClusterConfigDataprocMetricConfigMetrics, opts ...dcl.ApplyOption) []ClusterConfigDataprocMetricConfigMetrics { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterConfigDataprocMetricConfigMetrics(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterConfigDataprocMetricConfigMetrics(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c *Client, des, nw *ClusterConfigDataprocMetricConfigMetrics) *ClusterConfigDataprocMetricConfigMetrics { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterConfigDataprocMetricConfigMetrics while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.MetricOverrides, nw.MetricOverrides) { - nw.MetricOverrides = des.MetricOverrides - } - - return nw -} - -func canonicalizeNewClusterConfigDataprocMetricConfigMetricsSet(c *Client, des, nw []ClusterConfigDataprocMetricConfigMetrics) []ClusterConfigDataprocMetricConfigMetrics { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterConfigDataprocMetricConfigMetrics - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterConfigDataprocMetricConfigMetricsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterConfigDataprocMetricConfigMetricsSlice(c *Client, des, nw []ClusterConfigDataprocMetricConfigMetrics) []ClusterConfigDataprocMetricConfigMetrics { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterConfigDataprocMetricConfigMetrics - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterConfigDataprocMetricConfigMetrics(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterStatus(des, initial *ClusterStatus, opts ...dcl.ApplyOption) *ClusterStatus { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterStatus{} - - return cDes -} - -func canonicalizeClusterStatusSlice(des, initial []ClusterStatus, opts ...dcl.ApplyOption) []ClusterStatus { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterStatus, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterStatus(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterStatus, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterStatus(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterStatus(c *Client, des, nw *ClusterStatus) *ClusterStatus { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterStatus while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Detail, nw.Detail) { - nw.Detail = des.Detail - } - - return nw -} - -func canonicalizeNewClusterStatusSet(c *Client, des, nw []ClusterStatus) []ClusterStatus { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterStatus - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterStatusNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterStatus(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterStatusSlice(c *Client, des, nw []ClusterStatus) []ClusterStatus { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterStatus - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterStatus(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterStatusHistory(des, initial *ClusterStatusHistory, opts ...dcl.ApplyOption) *ClusterStatusHistory { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterStatusHistory{} - - return cDes -} - -func canonicalizeClusterStatusHistorySlice(des, initial []ClusterStatusHistory, opts ...dcl.ApplyOption) []ClusterStatusHistory { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterStatusHistory, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterStatusHistory(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterStatusHistory, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterStatusHistory(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterStatusHistory(c *Client, des, nw *ClusterStatusHistory) *ClusterStatusHistory { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterStatusHistory while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Detail, nw.Detail) { - nw.Detail = des.Detail - } - - return nw -} - -func canonicalizeNewClusterStatusHistorySet(c *Client, des, nw []ClusterStatusHistory) []ClusterStatusHistory { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterStatusHistory - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterStatusHistoryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterStatusHistory(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterStatusHistorySlice(c *Client, des, nw []ClusterStatusHistory) []ClusterStatusHistory { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterStatusHistory - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterStatusHistory(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterMetrics(des, initial *ClusterMetrics, opts ...dcl.ApplyOption) *ClusterMetrics { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterMetrics{} - - if dcl.IsZeroValue(des.HdfsMetrics) || (dcl.IsEmptyValueIndirect(des.HdfsMetrics) && dcl.IsEmptyValueIndirect(initial.HdfsMetrics)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.HdfsMetrics = initial.HdfsMetrics - } else { - cDes.HdfsMetrics = des.HdfsMetrics - } - if dcl.IsZeroValue(des.YarnMetrics) || (dcl.IsEmptyValueIndirect(des.YarnMetrics) && dcl.IsEmptyValueIndirect(initial.YarnMetrics)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.YarnMetrics = initial.YarnMetrics - } else { - cDes.YarnMetrics = des.YarnMetrics - } - - return cDes -} - -func canonicalizeClusterMetricsSlice(des, initial []ClusterMetrics, opts ...dcl.ApplyOption) []ClusterMetrics { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterMetrics, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterMetrics(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterMetrics, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterMetrics(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterMetrics(c *Client, des, nw *ClusterMetrics) *ClusterMetrics { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterMetrics while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterMetricsSet(c *Client, des, nw []ClusterMetrics) []ClusterMetrics { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterMetrics - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterMetricsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterMetrics(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterMetricsSlice(c *Client, des, nw []ClusterMetrics) []ClusterMetrics { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterMetrics - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterMetrics(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfig(des, initial *ClusterVirtualClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfig{} - - if dcl.IsZeroValue(des.StagingBucket) || (dcl.IsEmptyValueIndirect(des.StagingBucket) && dcl.IsEmptyValueIndirect(initial.StagingBucket)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.StagingBucket = initial.StagingBucket - } else { - cDes.StagingBucket = des.StagingBucket - } - cDes.KubernetesClusterConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(des.KubernetesClusterConfig, initial.KubernetesClusterConfig, opts...) - cDes.AuxiliaryServicesConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(des.AuxiliaryServicesConfig, initial.AuxiliaryServicesConfig, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigSlice(des, initial []ClusterVirtualClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfig) *ClusterVirtualClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.KubernetesClusterConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, des.KubernetesClusterConfig, nw.KubernetesClusterConfig) - nw.AuxiliaryServicesConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, des.AuxiliaryServicesConfig, nw.AuxiliaryServicesConfig) - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfig) []ClusterVirtualClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfig) []ClusterVirtualClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfig{} - - if dcl.StringCanonicalize(des.KubernetesNamespace, initial.KubernetesNamespace) || dcl.IsZeroValue(des.KubernetesNamespace) { - cDes.KubernetesNamespace = initial.KubernetesNamespace - } else { - cDes.KubernetesNamespace = des.KubernetesNamespace - } - cDes.GkeClusterConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(des.GkeClusterConfig, initial.GkeClusterConfig, opts...) - cDes.KubernetesSoftwareConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(des.KubernetesSoftwareConfig, initial.KubernetesSoftwareConfig, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfig) *ClusterVirtualClusterConfigKubernetesClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.KubernetesNamespace, nw.KubernetesNamespace) { - nw.KubernetesNamespace = des.KubernetesNamespace - } - nw.GkeClusterConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, des.GkeClusterConfig, nw.GkeClusterConfig) - nw.KubernetesSoftwareConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, des.KubernetesSoftwareConfig, nw.KubernetesSoftwareConfig) - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - - if dcl.IsZeroValue(des.GkeClusterTarget) || (dcl.IsEmptyValueIndirect(des.GkeClusterTarget) && dcl.IsEmptyValueIndirect(initial.GkeClusterTarget)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.GkeClusterTarget = initial.GkeClusterTarget - } else { - cDes.GkeClusterTarget = des.GkeClusterTarget - } - cDes.NodePoolTarget = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(des.NodePoolTarget, initial.NodePoolTarget, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.NodePoolTarget = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, des.NodePoolTarget, nw.NodePoolTarget) - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - - if dcl.IsZeroValue(des.NodePool) || (dcl.IsEmptyValueIndirect(des.NodePool) && dcl.IsEmptyValueIndirect(initial.NodePool)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NodePool = initial.NodePool - } else { - cDes.NodePool = des.NodePool - } - if dcl.IsZeroValue(des.Roles) || (dcl.IsEmptyValueIndirect(des.Roles) && dcl.IsEmptyValueIndirect(initial.Roles)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Roles = initial.Roles - } else { - cDes.Roles = des.Roles - } - cDes.NodePoolConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(des.NodePoolConfig, initial.NodePoolConfig, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.NodePoolConfig = des.NodePoolConfig - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - - cDes.Config = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(des.Config, initial.Config, opts...) - if dcl.StringArrayCanonicalize(des.Locations, initial.Locations) { - cDes.Locations = initial.Locations - } else { - cDes.Locations = des.Locations - } - cDes.Autoscaling = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(des.Autoscaling, initial.Autoscaling, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Config = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, des.Config, nw.Config) - if dcl.StringArrayCanonicalize(des.Locations, nw.Locations) { - nw.Locations = des.Locations - } - nw.Autoscaling = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, des.Autoscaling, nw.Autoscaling) - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - - if dcl.StringCanonicalize(des.MachineType, initial.MachineType) || dcl.IsZeroValue(des.MachineType) { - cDes.MachineType = initial.MachineType - } else { - cDes.MachineType = des.MachineType - } - if dcl.IsZeroValue(des.LocalSsdCount) || (dcl.IsEmptyValueIndirect(des.LocalSsdCount) && dcl.IsEmptyValueIndirect(initial.LocalSsdCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LocalSsdCount = initial.LocalSsdCount - } else { - cDes.LocalSsdCount = des.LocalSsdCount - } - if dcl.BoolCanonicalize(des.Preemptible, initial.Preemptible) || dcl.IsZeroValue(des.Preemptible) { - cDes.Preemptible = initial.Preemptible - } else { - cDes.Preemptible = des.Preemptible - } - cDes.Accelerators = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(des.Accelerators, initial.Accelerators, opts...) - if dcl.StringCanonicalize(des.MinCpuPlatform, initial.MinCpuPlatform) || dcl.IsZeroValue(des.MinCpuPlatform) { - cDes.MinCpuPlatform = initial.MinCpuPlatform - } else { - cDes.MinCpuPlatform = des.MinCpuPlatform - } - if dcl.StringCanonicalize(des.BootDiskKmsKey, initial.BootDiskKmsKey) || dcl.IsZeroValue(des.BootDiskKmsKey) { - cDes.BootDiskKmsKey = initial.BootDiskKmsKey - } else { - cDes.BootDiskKmsKey = des.BootDiskKmsKey - } - cDes.EphemeralStorageConfig = canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(des.EphemeralStorageConfig, initial.EphemeralStorageConfig, opts...) - if dcl.BoolCanonicalize(des.Spot, initial.Spot) || dcl.IsZeroValue(des.Spot) { - cDes.Spot = initial.Spot - } else { - cDes.Spot = des.Spot - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.MachineType, nw.MachineType) { - nw.MachineType = des.MachineType - } - if dcl.BoolCanonicalize(des.Preemptible, nw.Preemptible) { - nw.Preemptible = des.Preemptible - } - nw.Accelerators = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, des.Accelerators, nw.Accelerators) - if dcl.StringCanonicalize(des.MinCpuPlatform, nw.MinCpuPlatform) { - nw.MinCpuPlatform = des.MinCpuPlatform - } - if dcl.StringCanonicalize(des.BootDiskKmsKey, nw.BootDiskKmsKey) { - nw.BootDiskKmsKey = des.BootDiskKmsKey - } - nw.EphemeralStorageConfig = canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, des.EphemeralStorageConfig, nw.EphemeralStorageConfig) - if dcl.BoolCanonicalize(des.Spot, nw.Spot) { - nw.Spot = des.Spot - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - - if dcl.IsZeroValue(des.AcceleratorCount) || (dcl.IsEmptyValueIndirect(des.AcceleratorCount) && dcl.IsEmptyValueIndirect(initial.AcceleratorCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.AcceleratorCount = initial.AcceleratorCount - } else { - cDes.AcceleratorCount = des.AcceleratorCount - } - if dcl.StringCanonicalize(des.AcceleratorType, initial.AcceleratorType) || dcl.IsZeroValue(des.AcceleratorType) { - cDes.AcceleratorType = initial.AcceleratorType - } else { - cDes.AcceleratorType = des.AcceleratorType - } - if dcl.StringCanonicalize(des.GpuPartitionSize, initial.GpuPartitionSize) || dcl.IsZeroValue(des.GpuPartitionSize) { - cDes.GpuPartitionSize = initial.GpuPartitionSize - } else { - cDes.GpuPartitionSize = des.GpuPartitionSize - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AcceleratorType, nw.AcceleratorType) { - nw.AcceleratorType = des.AcceleratorType - } - if dcl.StringCanonicalize(des.GpuPartitionSize, nw.GpuPartitionSize) { - nw.GpuPartitionSize = des.GpuPartitionSize - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - - if dcl.IsZeroValue(des.LocalSsdCount) || (dcl.IsEmptyValueIndirect(des.LocalSsdCount) && dcl.IsEmptyValueIndirect(initial.LocalSsdCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LocalSsdCount = initial.LocalSsdCount - } else { - cDes.LocalSsdCount = des.LocalSsdCount - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - - if dcl.IsZeroValue(des.MinNodeCount) || (dcl.IsEmptyValueIndirect(des.MinNodeCount) && dcl.IsEmptyValueIndirect(initial.MinNodeCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MinNodeCount = initial.MinNodeCount - } else { - cDes.MinNodeCount = des.MinNodeCount - } - if dcl.IsZeroValue(des.MaxNodeCount) || (dcl.IsEmptyValueIndirect(des.MaxNodeCount) && dcl.IsEmptyValueIndirect(initial.MaxNodeCount)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MaxNodeCount = initial.MaxNodeCount - } else { - cDes.MaxNodeCount = des.MaxNodeCount - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(des, initial *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - - if dcl.IsZeroValue(des.ComponentVersion) || (dcl.IsEmptyValueIndirect(des.ComponentVersion) && dcl.IsEmptyValueIndirect(initial.ComponentVersion)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ComponentVersion = initial.ComponentVersion - } else { - cDes.ComponentVersion = des.ComponentVersion - } - if dcl.IsZeroValue(des.Properties) || (dcl.IsEmptyValueIndirect(des.Properties) && dcl.IsEmptyValueIndirect(initial.Properties)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Properties = initial.Properties - } else { - cDes.Properties = des.Properties - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(des, initial []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, des, nw *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - - cDes.MetastoreConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(des.MetastoreConfig, initial.MetastoreConfig, opts...) - cDes.SparkHistoryServerConfig = canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(des.SparkHistoryServerConfig, initial.SparkHistoryServerConfig, opts...) - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.MetastoreConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, des.MetastoreConfig, nw.MetastoreConfig) - nw.SparkHistoryServerConfig = canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, des.SparkHistoryServerConfig, nw.SparkHistoryServerConfig) - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - - if dcl.IsZeroValue(des.DataprocMetastoreService) || (dcl.IsEmptyValueIndirect(des.DataprocMetastoreService) && dcl.IsEmptyValueIndirect(initial.DataprocMetastoreService)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.DataprocMetastoreService = initial.DataprocMetastoreService - } else { - cDes.DataprocMetastoreService = des.DataprocMetastoreService - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(des, initial *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, opts ...dcl.ApplyOption) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - - if dcl.IsZeroValue(des.DataprocCluster) || (dcl.IsEmptyValueIndirect(des.DataprocCluster) && dcl.IsEmptyValueIndirect(initial.DataprocCluster)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.DataprocCluster = initial.DataprocCluster - } else { - cDes.DataprocCluster = des.DataprocCluster - } - - return cDes -} - -func canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(des, initial []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, opts ...dcl.ApplyOption) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, des, nw *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSet(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, des, nw []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffCluster(c *Client, desired, actual *Cluster, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ProjectId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareClusterConfigNewStyle, EmptyObject: EmptyClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{IgnoredPrefixes: []string{"goog-dataproc-"}, OperationSelector: dcl.TriggersOperation("updateClusterUpdateClusterOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterStatusNewStyle, EmptyObject: EmptyClusterStatus, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Status")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.StatusHistory, actual.StatusHistory, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterStatusHistoryNewStyle, EmptyObject: EmptyClusterStatusHistory, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StatusHistory")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ClusterUuid, actual.ClusterUuid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterUuid")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterMetricsNewStyle, EmptyObject: EmptyClusterMetrics, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.VirtualClusterConfig, actual.VirtualClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VirtualClusterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfig or *ClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfigBucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TempBucket, actual.TempBucket, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TempBucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GceClusterConfig, actual.GceClusterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigGceClusterConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GceClusterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MasterConfig, actual.MasterConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MasterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.WorkerConfig, actual.WorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkerConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryWorkerConfig, actual.SecondaryWorkerConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecondaryWorkerConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SoftwareConfig, actual.SoftwareConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSoftwareConfigNewStyle, EmptyObject: EmptyClusterConfigSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SoftwareConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InitializationActions, actual.InitializationActions, dcl.DiffInfo{ObjectFunction: compareClusterConfigInitializationActionsNewStyle, EmptyObject: EmptyClusterConfigInitializationActions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InitializationActions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EncryptionConfig, actual.EncryptionConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigEncryptionConfigNewStyle, EmptyObject: EmptyClusterConfigEncryptionConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EncryptionConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AutoscalingConfig, actual.AutoscalingConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigAutoscalingConfigNewStyle, EmptyObject: EmptyClusterConfigAutoscalingConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoscalingConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecurityConfig, actual.SecurityConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SecurityConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LifecycleConfig, actual.LifecycleConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigLifecycleConfigNewStyle, EmptyObject: EmptyClusterConfigLifecycleConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LifecycleConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EndpointConfig, actual.EndpointConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigEndpointConfigNewStyle, EmptyObject: EmptyClusterConfigEndpointConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EndpointConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigMetastoreConfigNewStyle, EmptyObject: EmptyClusterConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DataprocMetricConfig, actual.DataprocMetricConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigDataprocMetricConfigNewStyle, EmptyObject: EmptyClusterConfigDataprocMetricConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetricConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGceClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGceClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGceClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfig or *ClusterConfigGceClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGceClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGceClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Zone, actual.Zone, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ZoneUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Network, actual.Network, dcl.DiffInfo{ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NetworkUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Subnetwork, actual.Subnetwork, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SubnetworkUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InternalIPOnly, actual.InternalIPOnly, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InternalIpOnly")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PrivateIPv6GoogleAccess, actual.PrivateIPv6GoogleAccess, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PrivateIpv6GoogleAccess")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ServiceAccount, actual.ServiceAccount, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ServiceAccountScopes, actual.ServiceAccountScopes, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceAccountScopes")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Tags, actual.Tags, dcl.DiffInfo{Type: "Set", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Tags")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ReservationAffinity, actual.ReservationAffinity, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigReservationAffinityNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigReservationAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ReservationAffinity")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NodeGroupAffinity, actual.NodeGroupAffinity, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigNodeGroupAffinity, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupAffinity")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ShieldedInstanceConfig, actual.ShieldedInstanceConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigShieldedInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ShieldedInstanceConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ConfidentialInstanceConfig, actual.ConfidentialInstanceConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle, EmptyObject: EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConfidentialInstanceConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGceClusterConfigReservationAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGceClusterConfigReservationAffinity) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigReservationAffinity) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigReservationAffinity or *ClusterConfigGceClusterConfigReservationAffinity", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGceClusterConfigReservationAffinity) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGceClusterConfigReservationAffinity) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigReservationAffinity", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ConsumeReservationType, actual.ConsumeReservationType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConsumeReservationType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Values")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGceClusterConfigNodeGroupAffinityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGceClusterConfigNodeGroupAffinity) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigNodeGroupAffinity) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigNodeGroupAffinity or *ClusterConfigGceClusterConfigNodeGroupAffinity", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGceClusterConfigNodeGroupAffinity) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGceClusterConfigNodeGroupAffinity) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigNodeGroupAffinity", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NodeGroup, actual.NodeGroup, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeGroupUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGceClusterConfigShieldedInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGceClusterConfigShieldedInstanceConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigShieldedInstanceConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigShieldedInstanceConfig or *ClusterConfigGceClusterConfigShieldedInstanceConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGceClusterConfigShieldedInstanceConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGceClusterConfigShieldedInstanceConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigShieldedInstanceConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.EnableSecureBoot, actual.EnableSecureBoot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableSecureBoot")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EnableVtpm, actual.EnableVtpm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableVtpm")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EnableIntegrityMonitoring, actual.EnableIntegrityMonitoring, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableIntegrityMonitoring")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGceClusterConfigConfidentialInstanceConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGceClusterConfigConfidentialInstanceConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGceClusterConfigConfidentialInstanceConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigConfidentialInstanceConfig or *ClusterConfigGceClusterConfigConfidentialInstanceConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGceClusterConfigConfidentialInstanceConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGceClusterConfigConfidentialInstanceConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGceClusterConfigConfidentialInstanceConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.EnableConfidentialCompute, actual.EnableConfidentialCompute, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableConfidentialCompute")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMasterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMasterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMasterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfig or *ClusterConfigMasterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMasterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMasterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigMasterConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigMasterConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigMasterConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigMasterConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigMasterConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMasterConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMasterConfigDiskConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMasterConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigDiskConfig or *ClusterConfigMasterConfigDiskConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMasterConfigDiskConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMasterConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigDiskConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMasterConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMasterConfigManagedGroupConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMasterConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigManagedGroupConfig or *ClusterConfigMasterConfigManagedGroupConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMasterConfigManagedGroupConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMasterConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigManagedGroupConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMasterConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMasterConfigAccelerators) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMasterConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigAccelerators or *ClusterConfigMasterConfigAccelerators", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMasterConfigAccelerators) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMasterConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigAccelerators", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMasterConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMasterConfigInstanceReferences) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMasterConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigInstanceReferences or *ClusterConfigMasterConfigInstanceReferences", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMasterConfigInstanceReferences) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMasterConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMasterConfigInstanceReferences", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigWorkerConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfig or *ClusterConfigWorkerConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigWorkerConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigWorkerConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigWorkerConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigWorkerConfigDiskConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigWorkerConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigDiskConfig or *ClusterConfigWorkerConfigDiskConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigWorkerConfigDiskConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigWorkerConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigDiskConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigWorkerConfigManagedGroupConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigWorkerConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigManagedGroupConfig or *ClusterConfigWorkerConfigManagedGroupConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigWorkerConfigManagedGroupConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigWorkerConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigManagedGroupConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigWorkerConfigAccelerators) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigWorkerConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigAccelerators or *ClusterConfigWorkerConfigAccelerators", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigWorkerConfigAccelerators) - if !ok { - actualNotPointer, ok := a.(ClusterConfigWorkerConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigAccelerators", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigWorkerConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigWorkerConfigInstanceReferences) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigWorkerConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigInstanceReferences or *ClusterConfigWorkerConfigInstanceReferences", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigWorkerConfigInstanceReferences) - if !ok { - actualNotPointer, ok := a.(ClusterConfigWorkerConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigWorkerConfigInstanceReferences", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecondaryWorkerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecondaryWorkerConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfig or *ClusterConfigSecondaryWorkerConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecondaryWorkerConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NumInstances, actual.NumInstances, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumInstances")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceNames, actual.InstanceNames, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Image, actual.Image, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DiskConfig, actual.DiskConfig, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigDiskConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DiskConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IsPreemptible, actual.IsPreemptible, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IsPreemptible")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Preemptibility, actual.Preemptibility, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptibility")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ManagedGroupConfig, actual.ManagedGroupConfig, dcl.DiffInfo{OutputOnly: true, ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ManagedGroupConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ServerDefault: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceReferences, actual.InstanceReferences, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle, EmptyObject: EmptyClusterConfigSecondaryWorkerConfigInstanceReferences, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceReferences")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecondaryWorkerConfigDiskConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecondaryWorkerConfigDiskConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigDiskConfig or *ClusterConfigSecondaryWorkerConfigDiskConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecondaryWorkerConfigDiskConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigDiskConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigDiskConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.BootDiskType, actual.BootDiskType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BootDiskSizeGb, actual.BootDiskSizeGb, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskSizeGb")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumLocalSsds, actual.NumLocalSsds, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NumLocalSsds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalSsdInterface, actual.LocalSsdInterface, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdInterface")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecondaryWorkerConfigManagedGroupConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecondaryWorkerConfigManagedGroupConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigManagedGroupConfig or *ClusterConfigSecondaryWorkerConfigManagedGroupConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecondaryWorkerConfigManagedGroupConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigManagedGroupConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigManagedGroupConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceTemplateName, actual.InstanceTemplateName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceTemplateName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceGroupManagerName, actual.InstanceGroupManagerName, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceGroupManagerName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecondaryWorkerConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecondaryWorkerConfigAccelerators) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigAccelerators or *ClusterConfigSecondaryWorkerConfigAccelerators", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecondaryWorkerConfigAccelerators) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigAccelerators", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorTypeUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecondaryWorkerConfigInstanceReferencesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecondaryWorkerConfigInstanceReferences) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecondaryWorkerConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigInstanceReferences or *ClusterConfigSecondaryWorkerConfigInstanceReferences", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecondaryWorkerConfigInstanceReferences) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecondaryWorkerConfigInstanceReferences) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecondaryWorkerConfigInstanceReferences", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InstanceName, actual.InstanceName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceId, actual.InstanceId, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstanceId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicKey, actual.PublicKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PublicEciesKey, actual.PublicEciesKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PublicEciesKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSoftwareConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSoftwareConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSoftwareConfig or *ClusterConfigSoftwareConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSoftwareConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSoftwareConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSoftwareConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ImageVersion, actual.ImageVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ImageVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{CustomDiff: canonicalizeSoftwareConfigProperties, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OptionalComponents, actual.OptionalComponents, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("OptionalComponents")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigInitializationActionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigInitializationActions) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigInitializationActions) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigInitializationActions or *ClusterConfigInitializationActions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigInitializationActions) - if !ok { - actualNotPointer, ok := a.(ClusterConfigInitializationActions) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigInitializationActions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ExecutableFile, actual.ExecutableFile, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutableFile")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExecutionTimeout, actual.ExecutionTimeout, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ExecutionTimeout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigEncryptionConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigEncryptionConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigEncryptionConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigEncryptionConfig or *ClusterConfigEncryptionConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigEncryptionConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigEncryptionConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigEncryptionConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GcePdKmsKeyName, actual.GcePdKmsKeyName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GcePdKmsKeyName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigAutoscalingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigAutoscalingConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigAutoscalingConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigAutoscalingConfig or *ClusterConfigAutoscalingConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigAutoscalingConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigAutoscalingConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigAutoscalingConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Policy, actual.Policy, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("PolicyUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecurityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecurityConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecurityConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfig or *ClusterConfigSecurityConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecurityConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecurityConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.KerberosConfig, actual.KerberosConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigKerberosConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfigKerberosConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KerberosConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IdentityConfig, actual.IdentityConfig, dcl.DiffInfo{ObjectFunction: compareClusterConfigSecurityConfigIdentityConfigNewStyle, EmptyObject: EmptyClusterConfigSecurityConfigIdentityConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecurityConfigKerberosConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecurityConfigKerberosConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecurityConfigKerberosConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigKerberosConfig or *ClusterConfigSecurityConfigKerberosConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecurityConfigKerberosConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecurityConfigKerberosConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigKerberosConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.EnableKerberos, actual.EnableKerberos, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableKerberos")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.RootPrincipalPassword, actual.RootPrincipalPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RootPrincipalPasswordUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KmsKey, actual.KmsKey, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Keystore, actual.Keystore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystoreUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Truststore, actual.Truststore, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststoreUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KeystorePassword, actual.KeystorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeystorePasswordUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KeyPassword, actual.KeyPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KeyPasswordUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TruststorePassword, actual.TruststorePassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TruststorePasswordUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossRealmTrustRealm, actual.CrossRealmTrustRealm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustRealm")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossRealmTrustKdc, actual.CrossRealmTrustKdc, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustKdc")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossRealmTrustAdminServer, actual.CrossRealmTrustAdminServer, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustAdminServer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossRealmTrustSharedPassword, actual.CrossRealmTrustSharedPassword, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CrossRealmTrustSharedPasswordUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KdcDbKey, actual.KdcDbKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KdcDbKeyUri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TgtLifetimeHours, actual.TgtLifetimeHours, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TgtLifetimeHours")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Realm, actual.Realm, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Realm")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigSecurityConfigIdentityConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigSecurityConfigIdentityConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigSecurityConfigIdentityConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigIdentityConfig or *ClusterConfigSecurityConfigIdentityConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigSecurityConfigIdentityConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigSecurityConfigIdentityConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigSecurityConfigIdentityConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.UserServiceAccountMapping, actual.UserServiceAccountMapping, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UserServiceAccountMapping")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigLifecycleConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigLifecycleConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigLifecycleConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigLifecycleConfig or *ClusterConfigLifecycleConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigLifecycleConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigLifecycleConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigLifecycleConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.IdleDeleteTtl, actual.IdleDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleDeleteTtl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AutoDeleteTime, actual.AutoDeleteTime, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AutoDeleteTtl, actual.AutoDeleteTtl, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AutoDeleteTtl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IdleStartTime, actual.IdleStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdleStartTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigEndpointConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigEndpointConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigEndpointConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigEndpointConfig or *ClusterConfigEndpointConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigEndpointConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigEndpointConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigEndpointConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.HttpPorts, actual.HttpPorts, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpPorts")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EnableHttpPortAccess, actual.EnableHttpPortAccess, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EnableHttpPortAccess")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGkeClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGkeClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfig or *ClusterConfigGkeClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGkeClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGkeClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NamespacedGkeDeploymentTarget, actual.NamespacedGkeDeploymentTarget, dcl.DiffInfo{ObjectFunction: compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle, EmptyObject: EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NamespacedGkeDeploymentTarget")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget or *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) - if !ok { - actualNotPointer, ok := a.(ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TargetGkeCluster, actual.TargetGkeCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("TargetGkeCluster")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ClusterNamespace, actual.ClusterNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterNamespace")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigMetastoreConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigMetastoreConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMetastoreConfig or *ClusterConfigMetastoreConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigMetastoreConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigMetastoreConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigMetastoreConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigDataprocMetricConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigDataprocMetricConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigDataprocMetricConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfig or *ClusterConfigDataprocMetricConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigDataprocMetricConfig) - if !ok { - actualNotPointer, ok := a.(ClusterConfigDataprocMetricConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Metrics, actual.Metrics, dcl.DiffInfo{ObjectFunction: compareClusterConfigDataprocMetricConfigMetricsNewStyle, EmptyObject: EmptyClusterConfigDataprocMetricConfigMetrics, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metrics")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterConfigDataprocMetricConfigMetricsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterConfigDataprocMetricConfigMetrics) - if !ok { - desiredNotPointer, ok := d.(ClusterConfigDataprocMetricConfigMetrics) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfigMetrics or *ClusterConfigDataprocMetricConfigMetrics", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterConfigDataprocMetricConfigMetrics) - if !ok { - actualNotPointer, ok := a.(ClusterConfigDataprocMetricConfigMetrics) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterConfigDataprocMetricConfigMetrics", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MetricSource, actual.MetricSource, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricSource")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricOverrides, actual.MetricOverrides, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricOverrides")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterStatusNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterStatus) - if !ok { - desiredNotPointer, ok := d.(ClusterStatus) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterStatus or *ClusterStatus", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterStatus) - if !ok { - actualNotPointer, ok := a.(ClusterStatus) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterStatus", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Detail, actual.Detail, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Detail")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.StateStartTime, actual.StateStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StateStartTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Substate, actual.Substate, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Substate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterStatusHistoryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterStatusHistory) - if !ok { - desiredNotPointer, ok := d.(ClusterStatusHistory) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterStatusHistory or *ClusterStatusHistory", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterStatusHistory) - if !ok { - actualNotPointer, ok := a.(ClusterStatusHistory) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterStatusHistory", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Detail, actual.Detail, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Detail")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.StateStartTime, actual.StateStartTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StateStartTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Substate, actual.Substate, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Substate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterMetricsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterMetrics) - if !ok { - desiredNotPointer, ok := d.(ClusterMetrics) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterMetrics or *ClusterMetrics", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterMetrics) - if !ok { - actualNotPointer, ok := a.(ClusterMetrics) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterMetrics", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.HdfsMetrics, actual.HdfsMetrics, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HdfsMetrics")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YarnMetrics, actual.YarnMetrics, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("YarnMetrics")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfig or *ClusterVirtualClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.StagingBucket, actual.StagingBucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("StagingBucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KubernetesClusterConfig, actual.KubernetesClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesClusterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AuxiliaryServicesConfig, actual.AuxiliaryServicesConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AuxiliaryServicesConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfig or *ClusterVirtualClusterConfigKubernetesClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.KubernetesNamespace, actual.KubernetesNamespace, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesNamespace")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GkeClusterConfig, actual.GkeClusterConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KubernetesSoftwareConfig, actual.KubernetesSoftwareConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesSoftwareConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GkeClusterTarget, actual.GkeClusterTarget, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeClusterTarget")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NodePoolTarget, actual.NodePoolTarget, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePoolTarget")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NodePool, actual.NodePool, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePool")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Roles, actual.Roles, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Roles")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NodePoolConfig, actual.NodePoolConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodePoolConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Config, actual.Config, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Config")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Locations, actual.Locations, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Locations")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Autoscaling, actual.Autoscaling, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Autoscaling")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MachineType, actual.MachineType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MachineType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalSsdCount, actual.LocalSsdCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Preemptible, actual.Preemptible, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Preemptible")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Accelerators, actual.Accelerators, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Accelerators")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinCpuPlatform, actual.MinCpuPlatform, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinCpuPlatform")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BootDiskKmsKey, actual.BootDiskKmsKey, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BootDiskKmsKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.EphemeralStorageConfig, actual.EphemeralStorageConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("EphemeralStorageConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Spot, actual.Spot, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Spot")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AcceleratorCount, actual.AcceleratorCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AcceleratorType, actual.AcceleratorType, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("AcceleratorType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GpuPartitionSize, actual.GpuPartitionSize, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GpuPartitionSize")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LocalSsdCount, actual.LocalSsdCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LocalSsdCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling or *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MinNodeCount, actual.MinNodeCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MinNodeCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MaxNodeCount, actual.MaxNodeCount, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaxNodeCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig or *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ComponentVersion, actual.ComponentVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ComponentVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigAuxiliaryServicesConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MetastoreConfig, actual.MetastoreConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetastoreConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SparkHistoryServerConfig, actual.SparkHistoryServerConfig, dcl.DiffInfo{ObjectFunction: compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle, EmptyObject: EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SparkHistoryServerConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataprocMetastoreService, actual.DataprocMetastoreService, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocMetastoreService")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) - if !ok { - desiredNotPointer, ok := d.(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig or *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) - if !ok { - actualNotPointer, ok := a.(ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataprocCluster, actual.DataprocCluster, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DataprocCluster")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Cluster) urlNormalized() *Cluster { - normalized := dcl.Copy(*r).(Cluster) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.ClusterUuid = dcl.SelfLinkToName(r.ClusterUuid) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *Cluster) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateCluster" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Cluster resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Cluster) marshal(c *Client) ([]byte, error) { - m, err := expandCluster(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Cluster: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalCluster decodes JSON responses into the Cluster resource schema. -func unmarshalCluster(b []byte, c *Client, res *Cluster) (*Cluster, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapCluster(m, c, res) -} - -func unmarshalMapCluster(m map[string]interface{}, c *Client, res *Cluster) (*Cluster, error) { - - flattened := flattenCluster(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandCluster expands Cluster into a JSON request object. -func expandCluster(c *Client, f *Cluster) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into projectId: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["projectId"] = v - } - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["clusterName"] = v - } - if v, err := expandClusterConfig(c, f.Config, res); err != nil { - return nil, fmt.Errorf("error expanding Config into config: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["config"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - if v, err := expandClusterVirtualClusterConfig(c, f.VirtualClusterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding VirtualClusterConfig into virtualClusterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["virtualClusterConfig"] = v - } - - return m, nil -} - -// flattenCluster flattens Cluster from a JSON request object into the -// Cluster type. -func flattenCluster(c *Client, i interface{}, res *Cluster) *Cluster { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Cluster{} - resultRes.Project = dcl.FlattenString(m["projectId"]) - resultRes.Name = dcl.FlattenString(m["clusterName"]) - resultRes.Config = flattenClusterConfig(c, m["config"], res) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.Status = flattenClusterStatus(c, m["status"], res) - resultRes.StatusHistory = flattenClusterStatusHistorySlice(c, m["statusHistory"], res) - resultRes.ClusterUuid = dcl.FlattenString(m["clusterUuid"]) - resultRes.Metrics = flattenClusterMetrics(c, m["metrics"], res) - resultRes.Location = dcl.FlattenString(m["location"]) - resultRes.VirtualClusterConfig = flattenClusterVirtualClusterConfig(c, m["virtualClusterConfig"], res) - - return resultRes -} - -// expandClusterConfigMap expands the contents of ClusterConfig into a JSON -// request object. -func expandClusterConfigMap(c *Client, f map[string]ClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSlice expands the contents of ClusterConfig into a JSON -// request object. -func expandClusterConfigSlice(c *Client, f []ClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMap flattens the contents of ClusterConfig from a JSON -// response object. -func flattenClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfig{} - } - - items := make(map[string]ClusterConfig) - for k, item := range a { - items[k] = *flattenClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSlice flattens the contents of ClusterConfig from a JSON -// response object. -func flattenClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfig{} - } - - if len(a) == 0 { - return []ClusterConfig{} - } - - items := make([]ClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfig expands an instance of ClusterConfig into a JSON -// request object. -func expandClusterConfig(c *Client, f *ClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { - m["configBucket"] = v - } - if v := f.TempBucket; !dcl.IsEmptyValueIndirect(v) { - m["tempBucket"] = v - } - if v, err := expandClusterConfigGceClusterConfig(c, f.GceClusterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding GceClusterConfig into gceClusterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gceClusterConfig"] = v - } - if v, err := expandClusterConfigMasterConfig(c, f.MasterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding MasterConfig into masterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["masterConfig"] = v - } - if v, err := expandClusterConfigWorkerConfig(c, f.WorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding WorkerConfig into workerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["workerConfig"] = v - } - if v, err := expandClusterConfigSecondaryWorkerConfig(c, f.SecondaryWorkerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryWorkerConfig into secondaryWorkerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryWorkerConfig"] = v - } - if v, err := expandClusterConfigSoftwareConfig(c, f.SoftwareConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SoftwareConfig into softwareConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["softwareConfig"] = v - } - if v, err := expandClusterConfigInitializationActionsSlice(c, f.InitializationActions, res); err != nil { - return nil, fmt.Errorf("error expanding InitializationActions into initializationActions: %w", err) - } else if v != nil { - m["initializationActions"] = v - } - if v, err := expandClusterConfigEncryptionConfig(c, f.EncryptionConfig, res); err != nil { - return nil, fmt.Errorf("error expanding EncryptionConfig into encryptionConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["encryptionConfig"] = v - } - if v, err := expandClusterConfigAutoscalingConfig(c, f.AutoscalingConfig, res); err != nil { - return nil, fmt.Errorf("error expanding AutoscalingConfig into autoscalingConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["autoscalingConfig"] = v - } - if v, err := expandClusterConfigSecurityConfig(c, f.SecurityConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SecurityConfig into securityConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["securityConfig"] = v - } - if v, err := expandClusterConfigLifecycleConfig(c, f.LifecycleConfig, res); err != nil { - return nil, fmt.Errorf("error expanding LifecycleConfig into lifecycleConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["lifecycleConfig"] = v - } - if v, err := expandClusterConfigEndpointConfig(c, f.EndpointConfig, res); err != nil { - return nil, fmt.Errorf("error expanding EndpointConfig into endpointConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["endpointConfig"] = v - } - if v, err := expandClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gkeClusterConfig"] = v - } - if v, err := expandClusterConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { - return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metastoreConfig"] = v - } - if v, err := expandClusterConfigDataprocMetricConfig(c, f.DataprocMetricConfig, res); err != nil { - return nil, fmt.Errorf("error expanding DataprocMetricConfig into dataprocMetricConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["dataprocMetricConfig"] = v - } - - return m, nil -} - -// flattenClusterConfig flattens an instance of ClusterConfig from a JSON -// response object. -func flattenClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfig - } - r.StagingBucket = dcl.FlattenString(m["configBucket"]) - r.TempBucket = dcl.FlattenString(m["tempBucket"]) - r.GceClusterConfig = flattenClusterConfigGceClusterConfig(c, m["gceClusterConfig"], res) - r.MasterConfig = flattenClusterConfigMasterConfig(c, m["masterConfig"], res) - r.WorkerConfig = flattenClusterConfigWorkerConfig(c, m["workerConfig"], res) - r.SecondaryWorkerConfig = flattenClusterConfigSecondaryWorkerConfig(c, m["secondaryWorkerConfig"], res) - r.SoftwareConfig = flattenClusterConfigSoftwareConfig(c, m["softwareConfig"], res) - r.InitializationActions = flattenClusterConfigInitializationActionsSlice(c, m["initializationActions"], res) - r.EncryptionConfig = flattenClusterConfigEncryptionConfig(c, m["encryptionConfig"], res) - r.AutoscalingConfig = flattenClusterConfigAutoscalingConfig(c, m["autoscalingConfig"], res) - r.SecurityConfig = flattenClusterConfigSecurityConfig(c, m["securityConfig"], res) - r.LifecycleConfig = flattenClusterConfigLifecycleConfig(c, m["lifecycleConfig"], res) - r.EndpointConfig = flattenClusterConfigEndpointConfig(c, m["endpointConfig"], res) - r.GkeClusterConfig = flattenClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) - r.MetastoreConfig = flattenClusterConfigMetastoreConfig(c, m["metastoreConfig"], res) - r.DataprocMetricConfig = flattenClusterConfigDataprocMetricConfig(c, m["dataprocMetricConfig"], res) - - return r -} - -// expandClusterConfigGceClusterConfigMap expands the contents of ClusterConfigGceClusterConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGceClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGceClusterConfigSlice expands the contents of ClusterConfigGceClusterConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigSlice(c *Client, f []ClusterConfigGceClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGceClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGceClusterConfigMap flattens the contents of ClusterConfigGceClusterConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfig{} - } - - items := make(map[string]ClusterConfigGceClusterConfig) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGceClusterConfigSlice flattens the contents of ClusterConfigGceClusterConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfig{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfig{} - } - - items := make([]ClusterConfigGceClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGceClusterConfig expands an instance of ClusterConfigGceClusterConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfig(c *Client, f *ClusterConfigGceClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Zone; !dcl.IsEmptyValueIndirect(v) { - m["zoneUri"] = v - } - if v := f.Network; !dcl.IsEmptyValueIndirect(v) { - m["networkUri"] = v - } - if v := f.Subnetwork; !dcl.IsEmptyValueIndirect(v) { - m["subnetworkUri"] = v - } - if v := f.InternalIPOnly; !dcl.IsEmptyValueIndirect(v) { - m["internalIpOnly"] = v - } - if v := f.PrivateIPv6GoogleAccess; !dcl.IsEmptyValueIndirect(v) { - m["privateIpv6GoogleAccess"] = v - } - if v := f.ServiceAccount; !dcl.IsEmptyValueIndirect(v) { - m["serviceAccount"] = v - } - if v := f.ServiceAccountScopes; v != nil { - m["serviceAccountScopes"] = v - } - if v := f.Tags; v != nil { - m["tags"] = v - } - if v := f.Metadata; !dcl.IsEmptyValueIndirect(v) { - m["metadata"] = v - } - if v, err := expandClusterConfigGceClusterConfigReservationAffinity(c, f.ReservationAffinity, res); err != nil { - return nil, fmt.Errorf("error expanding ReservationAffinity into reservationAffinity: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["reservationAffinity"] = v - } - if v, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, f.NodeGroupAffinity, res); err != nil { - return nil, fmt.Errorf("error expanding NodeGroupAffinity into nodeGroupAffinity: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["nodeGroupAffinity"] = v - } - if v, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, f.ShieldedInstanceConfig, res); err != nil { - return nil, fmt.Errorf("error expanding ShieldedInstanceConfig into shieldedInstanceConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["shieldedInstanceConfig"] = v - } - if v, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, f.ConfidentialInstanceConfig, res); err != nil { - return nil, fmt.Errorf("error expanding ConfidentialInstanceConfig into confidentialInstanceConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["confidentialInstanceConfig"] = v - } - - return m, nil -} - -// flattenClusterConfigGceClusterConfig flattens an instance of ClusterConfigGceClusterConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGceClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGceClusterConfig - } - r.Zone = dcl.FlattenString(m["zoneUri"]) - r.Network = dcl.FlattenString(m["networkUri"]) - r.Subnetwork = dcl.FlattenString(m["subnetworkUri"]) - r.InternalIPOnly = dcl.FlattenBool(m["internalIpOnly"]) - r.PrivateIPv6GoogleAccess = flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(m["privateIpv6GoogleAccess"]) - r.ServiceAccount = dcl.FlattenString(m["serviceAccount"]) - r.ServiceAccountScopes = dcl.FlattenStringSlice(m["serviceAccountScopes"]) - r.Tags = dcl.FlattenStringSlice(m["tags"]) - r.Metadata = dcl.FlattenKeyValuePairs(m["metadata"]) - r.ReservationAffinity = flattenClusterConfigGceClusterConfigReservationAffinity(c, m["reservationAffinity"], res) - r.NodeGroupAffinity = flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, m["nodeGroupAffinity"], res) - r.ShieldedInstanceConfig = flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, m["shieldedInstanceConfig"], res) - r.ConfidentialInstanceConfig = flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, m["confidentialInstanceConfig"], res) - - return r -} - -// expandClusterConfigGceClusterConfigReservationAffinityMap expands the contents of ClusterConfigGceClusterConfigReservationAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigReservationAffinityMap(c *Client, f map[string]ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGceClusterConfigReservationAffinity(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGceClusterConfigReservationAffinitySlice expands the contents of ClusterConfigGceClusterConfigReservationAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, f []ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGceClusterConfigReservationAffinity(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGceClusterConfigReservationAffinityMap flattens the contents of ClusterConfigGceClusterConfigReservationAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigReservationAffinityMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigReservationAffinity { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigReservationAffinity{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigReservationAffinity{} - } - - items := make(map[string]ClusterConfigGceClusterConfigReservationAffinity) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGceClusterConfigReservationAffinitySlice flattens the contents of ClusterConfigGceClusterConfigReservationAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigReservationAffinitySlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigReservationAffinity { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigReservationAffinity{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigReservationAffinity{} - } - - items := make([]ClusterConfigGceClusterConfigReservationAffinity, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigReservationAffinity(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGceClusterConfigReservationAffinity expands an instance of ClusterConfigGceClusterConfigReservationAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigReservationAffinity(c *Client, f *ClusterConfigGceClusterConfigReservationAffinity, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ConsumeReservationType; !dcl.IsEmptyValueIndirect(v) { - m["consumeReservationType"] = v - } - if v := f.Key; !dcl.IsEmptyValueIndirect(v) { - m["key"] = v - } - if v := f.Values; v != nil { - m["values"] = v - } - - return m, nil -} - -// flattenClusterConfigGceClusterConfigReservationAffinity flattens an instance of ClusterConfigGceClusterConfigReservationAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigReservationAffinity(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigReservationAffinity { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGceClusterConfigReservationAffinity{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGceClusterConfigReservationAffinity - } - r.ConsumeReservationType = flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(m["consumeReservationType"]) - r.Key = dcl.FlattenString(m["key"]) - r.Values = dcl.FlattenStringSlice(m["values"]) - - return r -} - -// expandClusterConfigGceClusterConfigNodeGroupAffinityMap expands the contents of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, f map[string]ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGceClusterConfigNodeGroupAffinitySlice expands the contents of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, f []ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGceClusterConfigNodeGroupAffinity(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGceClusterConfigNodeGroupAffinityMap flattens the contents of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigNodeGroupAffinityMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigNodeGroupAffinity { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - - items := make(map[string]ClusterConfigGceClusterConfigNodeGroupAffinity) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGceClusterConfigNodeGroupAffinitySlice flattens the contents of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigNodeGroupAffinitySlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigNodeGroupAffinity { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - - items := make([]ClusterConfigGceClusterConfigNodeGroupAffinity, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigNodeGroupAffinity(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGceClusterConfigNodeGroupAffinity expands an instance of ClusterConfigGceClusterConfigNodeGroupAffinity into a JSON -// request object. -func expandClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, f *ClusterConfigGceClusterConfigNodeGroupAffinity, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NodeGroup; !dcl.IsEmptyValueIndirect(v) { - m["nodeGroupUri"] = v - } - - return m, nil -} - -// flattenClusterConfigGceClusterConfigNodeGroupAffinity flattens an instance of ClusterConfigGceClusterConfigNodeGroupAffinity from a JSON -// response object. -func flattenClusterConfigGceClusterConfigNodeGroupAffinity(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigNodeGroupAffinity { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGceClusterConfigNodeGroupAffinity{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGceClusterConfigNodeGroupAffinity - } - r.NodeGroup = dcl.FlattenString(m["nodeGroupUri"]) - - return r -} - -// expandClusterConfigGceClusterConfigShieldedInstanceConfigMap expands the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGceClusterConfigShieldedInstanceConfigSlice expands the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, f []ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGceClusterConfigShieldedInstanceConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGceClusterConfigShieldedInstanceConfigMap flattens the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigShieldedInstanceConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - - items := make(map[string]ClusterConfigGceClusterConfigShieldedInstanceConfig) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGceClusterConfigShieldedInstanceConfigSlice flattens the contents of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigShieldedInstanceConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigShieldedInstanceConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - - items := make([]ClusterConfigGceClusterConfigShieldedInstanceConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGceClusterConfigShieldedInstanceConfig expands an instance of ClusterConfigGceClusterConfigShieldedInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, f *ClusterConfigGceClusterConfigShieldedInstanceConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.EnableSecureBoot; !dcl.IsEmptyValueIndirect(v) { - m["enableSecureBoot"] = v - } - if v := f.EnableVtpm; !dcl.IsEmptyValueIndirect(v) { - m["enableVtpm"] = v - } - if v := f.EnableIntegrityMonitoring; !dcl.IsEmptyValueIndirect(v) { - m["enableIntegrityMonitoring"] = v - } - - return m, nil -} - -// flattenClusterConfigGceClusterConfigShieldedInstanceConfig flattens an instance of ClusterConfigGceClusterConfigShieldedInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigShieldedInstanceConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigShieldedInstanceConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGceClusterConfigShieldedInstanceConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGceClusterConfigShieldedInstanceConfig - } - r.EnableSecureBoot = dcl.FlattenBool(m["enableSecureBoot"]) - r.EnableVtpm = dcl.FlattenBool(m["enableVtpm"]) - r.EnableIntegrityMonitoring = dcl.FlattenBool(m["enableIntegrityMonitoring"]) - - return r -} - -// expandClusterConfigGceClusterConfigConfidentialInstanceConfigMap expands the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigConfidentialInstanceConfigMap(c *Client, f map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGceClusterConfigConfidentialInstanceConfigSlice expands the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, f []ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGceClusterConfigConfidentialInstanceConfigMap flattens the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigConfidentialInstanceConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - - items := make(map[string]ClusterConfigGceClusterConfigConfidentialInstanceConfig) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGceClusterConfigConfidentialInstanceConfigSlice flattens the contents of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigConfidentialInstanceConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigConfidentialInstanceConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - - items := make([]ClusterConfigGceClusterConfigConfidentialInstanceConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGceClusterConfigConfidentialInstanceConfig expands an instance of ClusterConfigGceClusterConfigConfidentialInstanceConfig into a JSON -// request object. -func expandClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, f *ClusterConfigGceClusterConfigConfidentialInstanceConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.EnableConfidentialCompute; !dcl.IsEmptyValueIndirect(v) { - m["enableConfidentialCompute"] = v - } - - return m, nil -} - -// flattenClusterConfigGceClusterConfigConfidentialInstanceConfig flattens an instance of ClusterConfigGceClusterConfigConfidentialInstanceConfig from a JSON -// response object. -func flattenClusterConfigGceClusterConfigConfidentialInstanceConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGceClusterConfigConfidentialInstanceConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGceClusterConfigConfidentialInstanceConfig - } - r.EnableConfidentialCompute = dcl.FlattenBool(m["enableConfidentialCompute"]) - - return r -} - -// expandClusterConfigMasterConfigMap expands the contents of ClusterConfigMasterConfig into a JSON -// request object. -func expandClusterConfigMasterConfigMap(c *Client, f map[string]ClusterConfigMasterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMasterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMasterConfigSlice expands the contents of ClusterConfigMasterConfig into a JSON -// request object. -func expandClusterConfigMasterConfigSlice(c *Client, f []ClusterConfigMasterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMasterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMasterConfigMap flattens the contents of ClusterConfigMasterConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfig{} - } - - items := make(map[string]ClusterConfigMasterConfig) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMasterConfigSlice flattens the contents of ClusterConfigMasterConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfig{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfig{} - } - - items := make([]ClusterConfigMasterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMasterConfig expands an instance of ClusterConfigMasterConfig into a JSON -// request object. -func expandClusterConfigMasterConfig(c *Client, f *ClusterConfigMasterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { - m["numInstances"] = v - } - if v := f.Image; !dcl.IsEmptyValueIndirect(v) { - m["imageUri"] = v - } - if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { - m["machineTypeUri"] = v - } - if v, err := expandClusterConfigMasterConfigDiskConfig(c, f.DiskConfig, res); err != nil { - return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["diskConfig"] = v - } - if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { - m["preemptibility"] = v - } - if v, err := expandClusterConfigMasterConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { - return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) - } else if v != nil { - m["accelerators"] = v - } - if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { - m["minCpuPlatform"] = v - } - - return m, nil -} - -// flattenClusterConfigMasterConfig flattens an instance of ClusterConfigMasterConfig from a JSON -// response object. -func flattenClusterConfigMasterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMasterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMasterConfig - } - r.NumInstances = dcl.FlattenInteger(m["numInstances"]) - r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) - r.Image = dcl.FlattenString(m["imageUri"]) - r.MachineType = dcl.FlattenString(m["machineTypeUri"]) - r.DiskConfig = flattenClusterConfigMasterConfigDiskConfig(c, m["diskConfig"], res) - r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) - r.Preemptibility = flattenClusterConfigMasterConfigPreemptibilityEnum(m["preemptibility"]) - r.ManagedGroupConfig = flattenClusterConfigMasterConfigManagedGroupConfig(c, m["managedGroupConfig"], res) - r.Accelerators = flattenClusterConfigMasterConfigAcceleratorsSlice(c, m["accelerators"], res) - r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) - r.InstanceReferences = flattenClusterConfigMasterConfigInstanceReferencesSlice(c, m["instanceReferences"], res) - - return r -} - -// expandClusterConfigMasterConfigDiskConfigMap expands the contents of ClusterConfigMasterConfigDiskConfig into a JSON -// request object. -func expandClusterConfigMasterConfigDiskConfigMap(c *Client, f map[string]ClusterConfigMasterConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMasterConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMasterConfigDiskConfigSlice expands the contents of ClusterConfigMasterConfigDiskConfig into a JSON -// request object. -func expandClusterConfigMasterConfigDiskConfigSlice(c *Client, f []ClusterConfigMasterConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMasterConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMasterConfigDiskConfigMap flattens the contents of ClusterConfigMasterConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigDiskConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfigDiskConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfigDiskConfig{} - } - - items := make(map[string]ClusterConfigMasterConfigDiskConfig) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMasterConfigDiskConfigSlice flattens the contents of ClusterConfigMasterConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigDiskConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfigDiskConfig{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfigDiskConfig{} - } - - items := make([]ClusterConfigMasterConfigDiskConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfigDiskConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMasterConfigDiskConfig expands an instance of ClusterConfigMasterConfigDiskConfig into a JSON -// request object. -func expandClusterConfigMasterConfigDiskConfig(c *Client, f *ClusterConfigMasterConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskType"] = v - } - if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskSizeGb"] = v - } - if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { - m["numLocalSsds"] = v - } - if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { - m["localSsdInterface"] = v - } - - return m, nil -} - -// flattenClusterConfigMasterConfigDiskConfig flattens an instance of ClusterConfigMasterConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigDiskConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMasterConfigDiskConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMasterConfigDiskConfig - } - r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) - r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) - r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) - r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) - - return r -} - -// expandClusterConfigMasterConfigManagedGroupConfigMap expands the contents of ClusterConfigMasterConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigMasterConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMasterConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMasterConfigManagedGroupConfigSlice expands the contents of ClusterConfigMasterConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMasterConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMasterConfigManagedGroupConfigMap flattens the contents of ClusterConfigMasterConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigManagedGroupConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfigManagedGroupConfig{} - } - - items := make(map[string]ClusterConfigMasterConfigManagedGroupConfig) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMasterConfigManagedGroupConfigSlice flattens the contents of ClusterConfigMasterConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigManagedGroupConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfigManagedGroupConfig{} - } - - items := make([]ClusterConfigMasterConfigManagedGroupConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMasterConfigManagedGroupConfig expands an instance of ClusterConfigMasterConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigMasterConfigManagedGroupConfig(c *Client, f *ClusterConfigMasterConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenClusterConfigMasterConfigManagedGroupConfig flattens an instance of ClusterConfigMasterConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigMasterConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigManagedGroupConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMasterConfigManagedGroupConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMasterConfigManagedGroupConfig - } - r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) - r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) - - return r -} - -// expandClusterConfigMasterConfigAcceleratorsMap expands the contents of ClusterConfigMasterConfigAccelerators into a JSON -// request object. -func expandClusterConfigMasterConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigMasterConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMasterConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMasterConfigAcceleratorsSlice expands the contents of ClusterConfigMasterConfigAccelerators into a JSON -// request object. -func expandClusterConfigMasterConfigAcceleratorsSlice(c *Client, f []ClusterConfigMasterConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMasterConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMasterConfigAcceleratorsMap flattens the contents of ClusterConfigMasterConfigAccelerators from a JSON -// response object. -func flattenClusterConfigMasterConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigAccelerators { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfigAccelerators{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfigAccelerators{} - } - - items := make(map[string]ClusterConfigMasterConfigAccelerators) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMasterConfigAcceleratorsSlice flattens the contents of ClusterConfigMasterConfigAccelerators from a JSON -// response object. -func flattenClusterConfigMasterConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigAccelerators { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfigAccelerators{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfigAccelerators{} - } - - items := make([]ClusterConfigMasterConfigAccelerators, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfigAccelerators(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMasterConfigAccelerators expands an instance of ClusterConfigMasterConfigAccelerators into a JSON -// request object. -func expandClusterConfigMasterConfigAccelerators(c *Client, f *ClusterConfigMasterConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorTypeUri"] = v - } - if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorCount"] = v - } - - return m, nil -} - -// flattenClusterConfigMasterConfigAccelerators flattens an instance of ClusterConfigMasterConfigAccelerators from a JSON -// response object. -func flattenClusterConfigMasterConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigAccelerators { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMasterConfigAccelerators{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMasterConfigAccelerators - } - r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) - r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) - - return r -} - -// expandClusterConfigMasterConfigInstanceReferencesMap expands the contents of ClusterConfigMasterConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigMasterConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigMasterConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMasterConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMasterConfigInstanceReferencesSlice expands the contents of ClusterConfigMasterConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigMasterConfigInstanceReferencesSlice(c *Client, f []ClusterConfigMasterConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMasterConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMasterConfigInstanceReferencesMap flattens the contents of ClusterConfigMasterConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigMasterConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigInstanceReferences { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfigInstanceReferences{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfigInstanceReferences{} - } - - items := make(map[string]ClusterConfigMasterConfigInstanceReferences) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfigInstanceReferences(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMasterConfigInstanceReferencesSlice flattens the contents of ClusterConfigMasterConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigMasterConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigInstanceReferences { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfigInstanceReferences{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfigInstanceReferences{} - } - - items := make([]ClusterConfigMasterConfigInstanceReferences, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfigInstanceReferences(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMasterConfigInstanceReferences expands an instance of ClusterConfigMasterConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigMasterConfigInstanceReferences(c *Client, f *ClusterConfigMasterConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { - m["instanceName"] = v - } - if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { - m["instanceId"] = v - } - if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { - m["publicKey"] = v - } - if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { - m["publicEciesKey"] = v - } - - return m, nil -} - -// flattenClusterConfigMasterConfigInstanceReferences flattens an instance of ClusterConfigMasterConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigMasterConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigMasterConfigInstanceReferences { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMasterConfigInstanceReferences{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMasterConfigInstanceReferences - } - r.InstanceName = dcl.FlattenString(m["instanceName"]) - r.InstanceId = dcl.FlattenString(m["instanceId"]) - r.PublicKey = dcl.FlattenString(m["publicKey"]) - r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) - - return r -} - -// expandClusterConfigWorkerConfigMap expands the contents of ClusterConfigWorkerConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigMap(c *Client, f map[string]ClusterConfigWorkerConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigWorkerConfigSlice expands the contents of ClusterConfigWorkerConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigSlice(c *Client, f []ClusterConfigWorkerConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigWorkerConfigMap flattens the contents of ClusterConfigWorkerConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfig{} - } - - items := make(map[string]ClusterConfigWorkerConfig) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigWorkerConfigSlice flattens the contents of ClusterConfigWorkerConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfig{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfig{} - } - - items := make([]ClusterConfigWorkerConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigWorkerConfig expands an instance of ClusterConfigWorkerConfig into a JSON -// request object. -func expandClusterConfigWorkerConfig(c *Client, f *ClusterConfigWorkerConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { - m["numInstances"] = v - } - if v := f.Image; !dcl.IsEmptyValueIndirect(v) { - m["imageUri"] = v - } - if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { - m["machineTypeUri"] = v - } - if v, err := expandClusterConfigWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { - return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["diskConfig"] = v - } - if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { - m["preemptibility"] = v - } - if v, err := expandClusterConfigWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { - return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) - } else if v != nil { - m["accelerators"] = v - } - if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { - m["minCpuPlatform"] = v - } - - return m, nil -} - -// flattenClusterConfigWorkerConfig flattens an instance of ClusterConfigWorkerConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigWorkerConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigWorkerConfig - } - r.NumInstances = dcl.FlattenInteger(m["numInstances"]) - r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) - r.Image = dcl.FlattenString(m["imageUri"]) - r.MachineType = dcl.FlattenString(m["machineTypeUri"]) - r.DiskConfig = flattenClusterConfigWorkerConfigDiskConfig(c, m["diskConfig"], res) - r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) - r.Preemptibility = flattenClusterConfigWorkerConfigPreemptibilityEnum(m["preemptibility"]) - r.ManagedGroupConfig = flattenClusterConfigWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) - r.Accelerators = flattenClusterConfigWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) - r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) - r.InstanceReferences = flattenClusterConfigWorkerConfigInstanceReferencesSlice(c, m["instanceReferences"], res) - - return r -} - -// expandClusterConfigWorkerConfigDiskConfigMap expands the contents of ClusterConfigWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigDiskConfigMap(c *Client, f map[string]ClusterConfigWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigWorkerConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigWorkerConfigDiskConfigSlice expands the contents of ClusterConfigWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigDiskConfigSlice(c *Client, f []ClusterConfigWorkerConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigWorkerConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigWorkerConfigDiskConfigMap flattens the contents of ClusterConfigWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigDiskConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfigDiskConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfigDiskConfig{} - } - - items := make(map[string]ClusterConfigWorkerConfigDiskConfig) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigWorkerConfigDiskConfigSlice flattens the contents of ClusterConfigWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigDiskConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfigDiskConfig{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfigDiskConfig{} - } - - items := make([]ClusterConfigWorkerConfigDiskConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigWorkerConfigDiskConfig expands an instance of ClusterConfigWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigDiskConfig(c *Client, f *ClusterConfigWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskType"] = v - } - if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskSizeGb"] = v - } - if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { - m["numLocalSsds"] = v - } - if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { - m["localSsdInterface"] = v - } - - return m, nil -} - -// flattenClusterConfigWorkerConfigDiskConfig flattens an instance of ClusterConfigWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigDiskConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigWorkerConfigDiskConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigWorkerConfigDiskConfig - } - r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) - r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) - r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) - r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) - - return r -} - -// expandClusterConfigWorkerConfigManagedGroupConfigMap expands the contents of ClusterConfigWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigWorkerConfigManagedGroupConfigSlice expands the contents of ClusterConfigWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigWorkerConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigWorkerConfigManagedGroupConfigMap flattens the contents of ClusterConfigWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigManagedGroupConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfigManagedGroupConfig{} - } - - items := make(map[string]ClusterConfigWorkerConfigManagedGroupConfig) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigWorkerConfigManagedGroupConfigSlice flattens the contents of ClusterConfigWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigManagedGroupConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfigManagedGroupConfig{} - } - - items := make([]ClusterConfigWorkerConfigManagedGroupConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigWorkerConfigManagedGroupConfig expands an instance of ClusterConfigWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigWorkerConfigManagedGroupConfig(c *Client, f *ClusterConfigWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenClusterConfigWorkerConfigManagedGroupConfig flattens an instance of ClusterConfigWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigManagedGroupConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigWorkerConfigManagedGroupConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigWorkerConfigManagedGroupConfig - } - r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) - r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) - - return r -} - -// expandClusterConfigWorkerConfigAcceleratorsMap expands the contents of ClusterConfigWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigWorkerConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigWorkerConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigWorkerConfigAcceleratorsSlice expands the contents of ClusterConfigWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigWorkerConfigAcceleratorsSlice(c *Client, f []ClusterConfigWorkerConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigWorkerConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigWorkerConfigAcceleratorsMap flattens the contents of ClusterConfigWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigAccelerators { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfigAccelerators{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfigAccelerators{} - } - - items := make(map[string]ClusterConfigWorkerConfigAccelerators) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigWorkerConfigAcceleratorsSlice flattens the contents of ClusterConfigWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigAccelerators { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfigAccelerators{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfigAccelerators{} - } - - items := make([]ClusterConfigWorkerConfigAccelerators, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigWorkerConfigAccelerators expands an instance of ClusterConfigWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigWorkerConfigAccelerators(c *Client, f *ClusterConfigWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorTypeUri"] = v - } - if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorCount"] = v - } - - return m, nil -} - -// flattenClusterConfigWorkerConfigAccelerators flattens an instance of ClusterConfigWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigWorkerConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigAccelerators { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigWorkerConfigAccelerators{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigWorkerConfigAccelerators - } - r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) - r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) - - return r -} - -// expandClusterConfigWorkerConfigInstanceReferencesMap expands the contents of ClusterConfigWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigWorkerConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigWorkerConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigWorkerConfigInstanceReferencesSlice expands the contents of ClusterConfigWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, f []ClusterConfigWorkerConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigWorkerConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigWorkerConfigInstanceReferencesMap flattens the contents of ClusterConfigWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigWorkerConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigInstanceReferences { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfigInstanceReferences{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfigInstanceReferences{} - } - - items := make(map[string]ClusterConfigWorkerConfigInstanceReferences) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigWorkerConfigInstanceReferencesSlice flattens the contents of ClusterConfigWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigWorkerConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigInstanceReferences { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfigInstanceReferences{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfigInstanceReferences{} - } - - items := make([]ClusterConfigWorkerConfigInstanceReferences, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigWorkerConfigInstanceReferences expands an instance of ClusterConfigWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigWorkerConfigInstanceReferences(c *Client, f *ClusterConfigWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { - m["instanceName"] = v - } - if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { - m["instanceId"] = v - } - if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { - m["publicKey"] = v - } - if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { - m["publicEciesKey"] = v - } - - return m, nil -} - -// flattenClusterConfigWorkerConfigInstanceReferences flattens an instance of ClusterConfigWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigWorkerConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigWorkerConfigInstanceReferences { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigWorkerConfigInstanceReferences{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigWorkerConfigInstanceReferences - } - r.InstanceName = dcl.FlattenString(m["instanceName"]) - r.InstanceId = dcl.FlattenString(m["instanceId"]) - r.PublicKey = dcl.FlattenString(m["publicKey"]) - r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) - - return r -} - -// expandClusterConfigSecondaryWorkerConfigMap expands the contents of ClusterConfigSecondaryWorkerConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecondaryWorkerConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecondaryWorkerConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfig{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfig{} - } - - items := make([]ClusterConfigSecondaryWorkerConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecondaryWorkerConfig expands an instance of ClusterConfigSecondaryWorkerConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfig(c *Client, f *ClusterConfigSecondaryWorkerConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NumInstances; !dcl.IsEmptyValueIndirect(v) { - m["numInstances"] = v - } - if v := f.Image; !dcl.IsEmptyValueIndirect(v) { - m["imageUri"] = v - } - if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { - m["machineTypeUri"] = v - } - if v, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, f.DiskConfig, res); err != nil { - return nil, fmt.Errorf("error expanding DiskConfig into diskConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["diskConfig"] = v - } - if v := f.Preemptibility; !dcl.IsEmptyValueIndirect(v) { - m["preemptibility"] = v - } - if v, err := expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { - return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) - } else if v != nil { - m["accelerators"] = v - } - if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { - m["minCpuPlatform"] = v - } - - return m, nil -} - -// flattenClusterConfigSecondaryWorkerConfig flattens an instance of ClusterConfigSecondaryWorkerConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecondaryWorkerConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecondaryWorkerConfig - } - r.NumInstances = dcl.FlattenInteger(m["numInstances"]) - r.InstanceNames = dcl.FlattenStringSlice(m["instanceNames"]) - r.Image = dcl.FlattenString(m["imageUri"]) - r.MachineType = dcl.FlattenString(m["machineTypeUri"]) - r.DiskConfig = flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, m["diskConfig"], res) - r.IsPreemptible = dcl.FlattenBool(m["isPreemptible"]) - r.Preemptibility = flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(m["preemptibility"]) - r.ManagedGroupConfig = flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, m["managedGroupConfig"], res) - r.Accelerators = flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c, m["accelerators"], res) - r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) - r.InstanceReferences = flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c, m["instanceReferences"], res) - - return r -} - -// expandClusterConfigSecondaryWorkerConfigDiskConfigMap expands the contents of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecondaryWorkerConfigDiskConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigDiskConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecondaryWorkerConfigDiskConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigDiskConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigDiskConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfigDiskConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigDiskConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigDiskConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigDiskConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - - items := make([]ClusterConfigSecondaryWorkerConfigDiskConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfigDiskConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecondaryWorkerConfigDiskConfig expands an instance of ClusterConfigSecondaryWorkerConfigDiskConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, f *ClusterConfigSecondaryWorkerConfigDiskConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.BootDiskType; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskType"] = v - } - if v := f.BootDiskSizeGb; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskSizeGb"] = v - } - if v := f.NumLocalSsds; !dcl.IsEmptyValueIndirect(v) { - m["numLocalSsds"] = v - } - if v := f.LocalSsdInterface; !dcl.IsEmptyValueIndirect(v) { - m["localSsdInterface"] = v - } - - return m, nil -} - -// flattenClusterConfigSecondaryWorkerConfigDiskConfig flattens an instance of ClusterConfigSecondaryWorkerConfigDiskConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigDiskConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigDiskConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecondaryWorkerConfigDiskConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecondaryWorkerConfigDiskConfig - } - r.BootDiskType = dcl.FlattenString(m["bootDiskType"]) - r.BootDiskSizeGb = dcl.FlattenInteger(m["bootDiskSizeGb"]) - r.NumLocalSsds = dcl.FlattenInteger(m["numLocalSsds"]) - r.LocalSsdInterface = dcl.FlattenString(m["localSsdInterface"]) - - return r -} - -// expandClusterConfigSecondaryWorkerConfigManagedGroupConfigMap expands the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice expands the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigMap flattens the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfigManagedGroupConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice flattens the contents of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - - items := make([]ClusterConfigSecondaryWorkerConfigManagedGroupConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecondaryWorkerConfigManagedGroupConfig expands an instance of ClusterConfigSecondaryWorkerConfigManagedGroupConfig into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, f *ClusterConfigSecondaryWorkerConfigManagedGroupConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig flattens an instance of ClusterConfigSecondaryWorkerConfigManagedGroupConfig from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigManagedGroupConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigManagedGroupConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecondaryWorkerConfigManagedGroupConfig - } - r.InstanceTemplateName = dcl.FlattenString(m["instanceTemplateName"]) - r.InstanceGroupManagerName = dcl.FlattenString(m["instanceGroupManagerName"]) - - return r -} - -// expandClusterConfigSecondaryWorkerConfigAcceleratorsMap expands the contents of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice expands the contents of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecondaryWorkerConfigAcceleratorsMap flattens the contents of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigAccelerators { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfigAccelerators{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfigAccelerators{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfigAccelerators) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice flattens the contents of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigAccelerators { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfigAccelerators{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfigAccelerators{} - } - - items := make([]ClusterConfigSecondaryWorkerConfigAccelerators, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfigAccelerators(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecondaryWorkerConfigAccelerators expands an instance of ClusterConfigSecondaryWorkerConfigAccelerators into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigAccelerators(c *Client, f *ClusterConfigSecondaryWorkerConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorTypeUri"] = v - } - if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorCount"] = v - } - - return m, nil -} - -// flattenClusterConfigSecondaryWorkerConfigAccelerators flattens an instance of ClusterConfigSecondaryWorkerConfigAccelerators from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigAccelerators { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecondaryWorkerConfigAccelerators{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecondaryWorkerConfigAccelerators - } - r.AcceleratorType = dcl.FlattenString(m["acceleratorTypeUri"]) - r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) - - return r -} - -// expandClusterConfigSecondaryWorkerConfigInstanceReferencesMap expands the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigInstanceReferencesMap(c *Client, f map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecondaryWorkerConfigInstanceReferencesSlice expands the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, f []ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecondaryWorkerConfigInstanceReferences(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecondaryWorkerConfigInstanceReferencesMap flattens the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigInstanceReferencesMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfigInstanceReferences) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice flattens the contents of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigInstanceReferencesSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigInstanceReferences { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfigInstanceReferences{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfigInstanceReferences{} - } - - items := make([]ClusterConfigSecondaryWorkerConfigInstanceReferences, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecondaryWorkerConfigInstanceReferences expands an instance of ClusterConfigSecondaryWorkerConfigInstanceReferences into a JSON -// request object. -func expandClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, f *ClusterConfigSecondaryWorkerConfigInstanceReferences, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.InstanceName; !dcl.IsEmptyValueIndirect(v) { - m["instanceName"] = v - } - if v := f.InstanceId; !dcl.IsEmptyValueIndirect(v) { - m["instanceId"] = v - } - if v := f.PublicKey; !dcl.IsEmptyValueIndirect(v) { - m["publicKey"] = v - } - if v := f.PublicEciesKey; !dcl.IsEmptyValueIndirect(v) { - m["publicEciesKey"] = v - } - - return m, nil -} - -// flattenClusterConfigSecondaryWorkerConfigInstanceReferences flattens an instance of ClusterConfigSecondaryWorkerConfigInstanceReferences from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigInstanceReferences(c *Client, i interface{}, res *Cluster) *ClusterConfigSecondaryWorkerConfigInstanceReferences { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecondaryWorkerConfigInstanceReferences{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecondaryWorkerConfigInstanceReferences - } - r.InstanceName = dcl.FlattenString(m["instanceName"]) - r.InstanceId = dcl.FlattenString(m["instanceId"]) - r.PublicKey = dcl.FlattenString(m["publicKey"]) - r.PublicEciesKey = dcl.FlattenString(m["publicEciesKey"]) - - return r -} - -// expandClusterConfigSoftwareConfigMap expands the contents of ClusterConfigSoftwareConfig into a JSON -// request object. -func expandClusterConfigSoftwareConfigMap(c *Client, f map[string]ClusterConfigSoftwareConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSoftwareConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSoftwareConfigSlice expands the contents of ClusterConfigSoftwareConfig into a JSON -// request object. -func expandClusterConfigSoftwareConfigSlice(c *Client, f []ClusterConfigSoftwareConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSoftwareConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSoftwareConfigMap flattens the contents of ClusterConfigSoftwareConfig from a JSON -// response object. -func flattenClusterConfigSoftwareConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSoftwareConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSoftwareConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSoftwareConfig{} - } - - items := make(map[string]ClusterConfigSoftwareConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSoftwareConfigSlice flattens the contents of ClusterConfigSoftwareConfig from a JSON -// response object. -func flattenClusterConfigSoftwareConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSoftwareConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSoftwareConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSoftwareConfig{} - } - - items := make([]ClusterConfigSoftwareConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSoftwareConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSoftwareConfig expands an instance of ClusterConfigSoftwareConfig into a JSON -// request object. -func expandClusterConfigSoftwareConfig(c *Client, f *ClusterConfigSoftwareConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ImageVersion; !dcl.IsEmptyValueIndirect(v) { - m["imageVersion"] = v - } - if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { - m["properties"] = v - } - if v := f.OptionalComponents; v != nil { - m["optionalComponents"] = v - } - - return m, nil -} - -// flattenClusterConfigSoftwareConfig flattens an instance of ClusterConfigSoftwareConfig from a JSON -// response object. -func flattenClusterConfigSoftwareConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSoftwareConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSoftwareConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSoftwareConfig - } - r.ImageVersion = dcl.FlattenString(m["imageVersion"]) - r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) - r.OptionalComponents = flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c, m["optionalComponents"], res) - - return r -} - -// expandClusterConfigInitializationActionsMap expands the contents of ClusterConfigInitializationActions into a JSON -// request object. -func expandClusterConfigInitializationActionsMap(c *Client, f map[string]ClusterConfigInitializationActions, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigInitializationActions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigInitializationActionsSlice expands the contents of ClusterConfigInitializationActions into a JSON -// request object. -func expandClusterConfigInitializationActionsSlice(c *Client, f []ClusterConfigInitializationActions, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigInitializationActions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigInitializationActionsMap flattens the contents of ClusterConfigInitializationActions from a JSON -// response object. -func flattenClusterConfigInitializationActionsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigInitializationActions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigInitializationActions{} - } - - if len(a) == 0 { - return map[string]ClusterConfigInitializationActions{} - } - - items := make(map[string]ClusterConfigInitializationActions) - for k, item := range a { - items[k] = *flattenClusterConfigInitializationActions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigInitializationActionsSlice flattens the contents of ClusterConfigInitializationActions from a JSON -// response object. -func flattenClusterConfigInitializationActionsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigInitializationActions { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigInitializationActions{} - } - - if len(a) == 0 { - return []ClusterConfigInitializationActions{} - } - - items := make([]ClusterConfigInitializationActions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigInitializationActions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigInitializationActions expands an instance of ClusterConfigInitializationActions into a JSON -// request object. -func expandClusterConfigInitializationActions(c *Client, f *ClusterConfigInitializationActions, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ExecutableFile; !dcl.IsEmptyValueIndirect(v) { - m["executableFile"] = v - } - if v := f.ExecutionTimeout; !dcl.IsEmptyValueIndirect(v) { - m["executionTimeout"] = v - } - - return m, nil -} - -// flattenClusterConfigInitializationActions flattens an instance of ClusterConfigInitializationActions from a JSON -// response object. -func flattenClusterConfigInitializationActions(c *Client, i interface{}, res *Cluster) *ClusterConfigInitializationActions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigInitializationActions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigInitializationActions - } - r.ExecutableFile = dcl.FlattenString(m["executableFile"]) - r.ExecutionTimeout = dcl.FlattenString(m["executionTimeout"]) - - return r -} - -// expandClusterConfigEncryptionConfigMap expands the contents of ClusterConfigEncryptionConfig into a JSON -// request object. -func expandClusterConfigEncryptionConfigMap(c *Client, f map[string]ClusterConfigEncryptionConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigEncryptionConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigEncryptionConfigSlice expands the contents of ClusterConfigEncryptionConfig into a JSON -// request object. -func expandClusterConfigEncryptionConfigSlice(c *Client, f []ClusterConfigEncryptionConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigEncryptionConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigEncryptionConfigMap flattens the contents of ClusterConfigEncryptionConfig from a JSON -// response object. -func flattenClusterConfigEncryptionConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigEncryptionConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigEncryptionConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigEncryptionConfig{} - } - - items := make(map[string]ClusterConfigEncryptionConfig) - for k, item := range a { - items[k] = *flattenClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigEncryptionConfigSlice flattens the contents of ClusterConfigEncryptionConfig from a JSON -// response object. -func flattenClusterConfigEncryptionConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigEncryptionConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigEncryptionConfig{} - } - - if len(a) == 0 { - return []ClusterConfigEncryptionConfig{} - } - - items := make([]ClusterConfigEncryptionConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigEncryptionConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigEncryptionConfig expands an instance of ClusterConfigEncryptionConfig into a JSON -// request object. -func expandClusterConfigEncryptionConfig(c *Client, f *ClusterConfigEncryptionConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GcePdKmsKeyName; !dcl.IsEmptyValueIndirect(v) { - m["gcePdKmsKeyName"] = v - } - - return m, nil -} - -// flattenClusterConfigEncryptionConfig flattens an instance of ClusterConfigEncryptionConfig from a JSON -// response object. -func flattenClusterConfigEncryptionConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigEncryptionConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigEncryptionConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigEncryptionConfig - } - r.GcePdKmsKeyName = dcl.FlattenString(m["gcePdKmsKeyName"]) - - return r -} - -// expandClusterConfigAutoscalingConfigMap expands the contents of ClusterConfigAutoscalingConfig into a JSON -// request object. -func expandClusterConfigAutoscalingConfigMap(c *Client, f map[string]ClusterConfigAutoscalingConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigAutoscalingConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigAutoscalingConfigSlice expands the contents of ClusterConfigAutoscalingConfig into a JSON -// request object. -func expandClusterConfigAutoscalingConfigSlice(c *Client, f []ClusterConfigAutoscalingConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigAutoscalingConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigAutoscalingConfigMap flattens the contents of ClusterConfigAutoscalingConfig from a JSON -// response object. -func flattenClusterConfigAutoscalingConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigAutoscalingConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigAutoscalingConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigAutoscalingConfig{} - } - - items := make(map[string]ClusterConfigAutoscalingConfig) - for k, item := range a { - items[k] = *flattenClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigAutoscalingConfigSlice flattens the contents of ClusterConfigAutoscalingConfig from a JSON -// response object. -func flattenClusterConfigAutoscalingConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigAutoscalingConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigAutoscalingConfig{} - } - - if len(a) == 0 { - return []ClusterConfigAutoscalingConfig{} - } - - items := make([]ClusterConfigAutoscalingConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigAutoscalingConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigAutoscalingConfig expands an instance of ClusterConfigAutoscalingConfig into a JSON -// request object. -func expandClusterConfigAutoscalingConfig(c *Client, f *ClusterConfigAutoscalingConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Policy; !dcl.IsEmptyValueIndirect(v) { - m["policyUri"] = v - } - - return m, nil -} - -// flattenClusterConfigAutoscalingConfig flattens an instance of ClusterConfigAutoscalingConfig from a JSON -// response object. -func flattenClusterConfigAutoscalingConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigAutoscalingConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigAutoscalingConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigAutoscalingConfig - } - r.Policy = dcl.FlattenString(m["policyUri"]) - - return r -} - -// expandClusterConfigSecurityConfigMap expands the contents of ClusterConfigSecurityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigMap(c *Client, f map[string]ClusterConfigSecurityConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecurityConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecurityConfigSlice expands the contents of ClusterConfigSecurityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigSlice(c *Client, f []ClusterConfigSecurityConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecurityConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecurityConfigMap flattens the contents of ClusterConfigSecurityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecurityConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecurityConfig{} - } - - items := make(map[string]ClusterConfigSecurityConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecurityConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecurityConfigSlice flattens the contents of ClusterConfigSecurityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecurityConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecurityConfig{} - } - - items := make([]ClusterConfigSecurityConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecurityConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecurityConfig expands an instance of ClusterConfigSecurityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfig(c *Client, f *ClusterConfigSecurityConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandClusterConfigSecurityConfigKerberosConfig(c, f.KerberosConfig, res); err != nil { - return nil, fmt.Errorf("error expanding KerberosConfig into kerberosConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["kerberosConfig"] = v - } - if v, err := expandClusterConfigSecurityConfigIdentityConfig(c, f.IdentityConfig, res); err != nil { - return nil, fmt.Errorf("error expanding IdentityConfig into identityConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["identityConfig"] = v - } - - return m, nil -} - -// flattenClusterConfigSecurityConfig flattens an instance of ClusterConfigSecurityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecurityConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecurityConfig - } - r.KerberosConfig = flattenClusterConfigSecurityConfigKerberosConfig(c, m["kerberosConfig"], res) - r.IdentityConfig = flattenClusterConfigSecurityConfigIdentityConfig(c, m["identityConfig"], res) - - return r -} - -// expandClusterConfigSecurityConfigKerberosConfigMap expands the contents of ClusterConfigSecurityConfigKerberosConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigKerberosConfigMap(c *Client, f map[string]ClusterConfigSecurityConfigKerberosConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecurityConfigKerberosConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecurityConfigKerberosConfigSlice expands the contents of ClusterConfigSecurityConfigKerberosConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigKerberosConfigSlice(c *Client, f []ClusterConfigSecurityConfigKerberosConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecurityConfigKerberosConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecurityConfigKerberosConfigMap flattens the contents of ClusterConfigSecurityConfigKerberosConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigKerberosConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfigKerberosConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecurityConfigKerberosConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecurityConfigKerberosConfig{} - } - - items := make(map[string]ClusterConfigSecurityConfigKerberosConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecurityConfigKerberosConfigSlice flattens the contents of ClusterConfigSecurityConfigKerberosConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigKerberosConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfigKerberosConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecurityConfigKerberosConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecurityConfigKerberosConfig{} - } - - items := make([]ClusterConfigSecurityConfigKerberosConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecurityConfigKerberosConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecurityConfigKerberosConfig expands an instance of ClusterConfigSecurityConfigKerberosConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigKerberosConfig(c *Client, f *ClusterConfigSecurityConfigKerberosConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.EnableKerberos; !dcl.IsEmptyValueIndirect(v) { - m["enableKerberos"] = v - } - if v := f.RootPrincipalPassword; !dcl.IsEmptyValueIndirect(v) { - m["rootPrincipalPasswordUri"] = v - } - if v := f.KmsKey; !dcl.IsEmptyValueIndirect(v) { - m["kmsKeyUri"] = v - } - if v := f.Keystore; !dcl.IsEmptyValueIndirect(v) { - m["keystoreUri"] = v - } - if v := f.Truststore; !dcl.IsEmptyValueIndirect(v) { - m["truststoreUri"] = v - } - if v := f.KeystorePassword; !dcl.IsEmptyValueIndirect(v) { - m["keystorePasswordUri"] = v - } - if v := f.KeyPassword; !dcl.IsEmptyValueIndirect(v) { - m["keyPasswordUri"] = v - } - if v := f.TruststorePassword; !dcl.IsEmptyValueIndirect(v) { - m["truststorePasswordUri"] = v - } - if v := f.CrossRealmTrustRealm; !dcl.IsEmptyValueIndirect(v) { - m["crossRealmTrustRealm"] = v - } - if v := f.CrossRealmTrustKdc; !dcl.IsEmptyValueIndirect(v) { - m["crossRealmTrustKdc"] = v - } - if v := f.CrossRealmTrustAdminServer; !dcl.IsEmptyValueIndirect(v) { - m["crossRealmTrustAdminServer"] = v - } - if v := f.CrossRealmTrustSharedPassword; !dcl.IsEmptyValueIndirect(v) { - m["crossRealmTrustSharedPasswordUri"] = v - } - if v := f.KdcDbKey; !dcl.IsEmptyValueIndirect(v) { - m["kdcDbKeyUri"] = v - } - if v := f.TgtLifetimeHours; !dcl.IsEmptyValueIndirect(v) { - m["tgtLifetimeHours"] = v - } - if v := f.Realm; !dcl.IsEmptyValueIndirect(v) { - m["realm"] = v - } - - return m, nil -} - -// flattenClusterConfigSecurityConfigKerberosConfig flattens an instance of ClusterConfigSecurityConfigKerberosConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigKerberosConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfigKerberosConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecurityConfigKerberosConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecurityConfigKerberosConfig - } - r.EnableKerberos = dcl.FlattenBool(m["enableKerberos"]) - r.RootPrincipalPassword = dcl.FlattenString(m["rootPrincipalPasswordUri"]) - r.KmsKey = dcl.FlattenString(m["kmsKeyUri"]) - r.Keystore = dcl.FlattenString(m["keystoreUri"]) - r.Truststore = dcl.FlattenString(m["truststoreUri"]) - r.KeystorePassword = dcl.FlattenString(m["keystorePasswordUri"]) - r.KeyPassword = dcl.FlattenString(m["keyPasswordUri"]) - r.TruststorePassword = dcl.FlattenString(m["truststorePasswordUri"]) - r.CrossRealmTrustRealm = dcl.FlattenString(m["crossRealmTrustRealm"]) - r.CrossRealmTrustKdc = dcl.FlattenString(m["crossRealmTrustKdc"]) - r.CrossRealmTrustAdminServer = dcl.FlattenString(m["crossRealmTrustAdminServer"]) - r.CrossRealmTrustSharedPassword = dcl.FlattenString(m["crossRealmTrustSharedPasswordUri"]) - r.KdcDbKey = dcl.FlattenString(m["kdcDbKeyUri"]) - r.TgtLifetimeHours = dcl.FlattenInteger(m["tgtLifetimeHours"]) - r.Realm = dcl.FlattenString(m["realm"]) - - return r -} - -// expandClusterConfigSecurityConfigIdentityConfigMap expands the contents of ClusterConfigSecurityConfigIdentityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigIdentityConfigMap(c *Client, f map[string]ClusterConfigSecurityConfigIdentityConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigSecurityConfigIdentityConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigSecurityConfigIdentityConfigSlice expands the contents of ClusterConfigSecurityConfigIdentityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigIdentityConfigSlice(c *Client, f []ClusterConfigSecurityConfigIdentityConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigSecurityConfigIdentityConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigSecurityConfigIdentityConfigMap flattens the contents of ClusterConfigSecurityConfigIdentityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigIdentityConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecurityConfigIdentityConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecurityConfigIdentityConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecurityConfigIdentityConfig{} - } - - items := make(map[string]ClusterConfigSecurityConfigIdentityConfig) - for k, item := range a { - items[k] = *flattenClusterConfigSecurityConfigIdentityConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigSecurityConfigIdentityConfigSlice flattens the contents of ClusterConfigSecurityConfigIdentityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigIdentityConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecurityConfigIdentityConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecurityConfigIdentityConfig{} - } - - if len(a) == 0 { - return []ClusterConfigSecurityConfigIdentityConfig{} - } - - items := make([]ClusterConfigSecurityConfigIdentityConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecurityConfigIdentityConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigSecurityConfigIdentityConfig expands an instance of ClusterConfigSecurityConfigIdentityConfig into a JSON -// request object. -func expandClusterConfigSecurityConfigIdentityConfig(c *Client, f *ClusterConfigSecurityConfigIdentityConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.UserServiceAccountMapping; !dcl.IsEmptyValueIndirect(v) { - m["userServiceAccountMapping"] = v - } - - return m, nil -} - -// flattenClusterConfigSecurityConfigIdentityConfig flattens an instance of ClusterConfigSecurityConfigIdentityConfig from a JSON -// response object. -func flattenClusterConfigSecurityConfigIdentityConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigSecurityConfigIdentityConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigSecurityConfigIdentityConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigSecurityConfigIdentityConfig - } - r.UserServiceAccountMapping = dcl.FlattenKeyValuePairs(m["userServiceAccountMapping"]) - - return r -} - -// expandClusterConfigLifecycleConfigMap expands the contents of ClusterConfigLifecycleConfig into a JSON -// request object. -func expandClusterConfigLifecycleConfigMap(c *Client, f map[string]ClusterConfigLifecycleConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigLifecycleConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigLifecycleConfigSlice expands the contents of ClusterConfigLifecycleConfig into a JSON -// request object. -func expandClusterConfigLifecycleConfigSlice(c *Client, f []ClusterConfigLifecycleConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigLifecycleConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigLifecycleConfigMap flattens the contents of ClusterConfigLifecycleConfig from a JSON -// response object. -func flattenClusterConfigLifecycleConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigLifecycleConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigLifecycleConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigLifecycleConfig{} - } - - items := make(map[string]ClusterConfigLifecycleConfig) - for k, item := range a { - items[k] = *flattenClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigLifecycleConfigSlice flattens the contents of ClusterConfigLifecycleConfig from a JSON -// response object. -func flattenClusterConfigLifecycleConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigLifecycleConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigLifecycleConfig{} - } - - if len(a) == 0 { - return []ClusterConfigLifecycleConfig{} - } - - items := make([]ClusterConfigLifecycleConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigLifecycleConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigLifecycleConfig expands an instance of ClusterConfigLifecycleConfig into a JSON -// request object. -func expandClusterConfigLifecycleConfig(c *Client, f *ClusterConfigLifecycleConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.IdleDeleteTtl; !dcl.IsEmptyValueIndirect(v) { - m["idleDeleteTtl"] = v - } - if v := f.AutoDeleteTime; !dcl.IsEmptyValueIndirect(v) { - m["autoDeleteTime"] = v - } - if v := f.AutoDeleteTtl; !dcl.IsEmptyValueIndirect(v) { - m["autoDeleteTtl"] = v - } - - return m, nil -} - -// flattenClusterConfigLifecycleConfig flattens an instance of ClusterConfigLifecycleConfig from a JSON -// response object. -func flattenClusterConfigLifecycleConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigLifecycleConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigLifecycleConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigLifecycleConfig - } - r.IdleDeleteTtl = dcl.FlattenString(m["idleDeleteTtl"]) - r.AutoDeleteTime = dcl.FlattenString(m["autoDeleteTime"]) - r.AutoDeleteTtl = dcl.FlattenString(m["autoDeleteTtl"]) - r.IdleStartTime = dcl.FlattenString(m["idleStartTime"]) - - return r -} - -// expandClusterConfigEndpointConfigMap expands the contents of ClusterConfigEndpointConfig into a JSON -// request object. -func expandClusterConfigEndpointConfigMap(c *Client, f map[string]ClusterConfigEndpointConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigEndpointConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigEndpointConfigSlice expands the contents of ClusterConfigEndpointConfig into a JSON -// request object. -func expandClusterConfigEndpointConfigSlice(c *Client, f []ClusterConfigEndpointConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigEndpointConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigEndpointConfigMap flattens the contents of ClusterConfigEndpointConfig from a JSON -// response object. -func flattenClusterConfigEndpointConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigEndpointConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigEndpointConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigEndpointConfig{} - } - - items := make(map[string]ClusterConfigEndpointConfig) - for k, item := range a { - items[k] = *flattenClusterConfigEndpointConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigEndpointConfigSlice flattens the contents of ClusterConfigEndpointConfig from a JSON -// response object. -func flattenClusterConfigEndpointConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigEndpointConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigEndpointConfig{} - } - - if len(a) == 0 { - return []ClusterConfigEndpointConfig{} - } - - items := make([]ClusterConfigEndpointConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigEndpointConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigEndpointConfig expands an instance of ClusterConfigEndpointConfig into a JSON -// request object. -func expandClusterConfigEndpointConfig(c *Client, f *ClusterConfigEndpointConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.EnableHttpPortAccess; !dcl.IsEmptyValueIndirect(v) { - m["enableHttpPortAccess"] = v - } - - return m, nil -} - -// flattenClusterConfigEndpointConfig flattens an instance of ClusterConfigEndpointConfig from a JSON -// response object. -func flattenClusterConfigEndpointConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigEndpointConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigEndpointConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigEndpointConfig - } - r.HttpPorts = dcl.FlattenKeyValuePairs(m["httpPorts"]) - r.EnableHttpPortAccess = dcl.FlattenBool(m["enableHttpPortAccess"]) - - return r -} - -// expandClusterConfigGkeClusterConfigMap expands the contents of ClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterConfigGkeClusterConfigMap(c *Client, f map[string]ClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGkeClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGkeClusterConfigSlice expands the contents of ClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterConfigGkeClusterConfigSlice(c *Client, f []ClusterConfigGkeClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGkeClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGkeClusterConfigMap flattens the contents of ClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGkeClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGkeClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGkeClusterConfig{} - } - - items := make(map[string]ClusterConfigGkeClusterConfig) - for k, item := range a { - items[k] = *flattenClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGkeClusterConfigSlice flattens the contents of ClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGkeClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGkeClusterConfig{} - } - - if len(a) == 0 { - return []ClusterConfigGkeClusterConfig{} - } - - items := make([]ClusterConfigGkeClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGkeClusterConfig expands an instance of ClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterConfigGkeClusterConfig(c *Client, f *ClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, f.NamespacedGkeDeploymentTarget, res); err != nil { - return nil, fmt.Errorf("error expanding NamespacedGkeDeploymentTarget into namespacedGkeDeploymentTarget: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["namespacedGkeDeploymentTarget"] = v - } - - return m, nil -} - -// flattenClusterConfigGkeClusterConfig flattens an instance of ClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterConfigGkeClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigGkeClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGkeClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGkeClusterConfig - } - r.NamespacedGkeDeploymentTarget = flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, m["namespacedGkeDeploymentTarget"], res) - - return r -} - -// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap expands the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON -// request object. -func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, f map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice expands the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON -// request object. -func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, f []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap flattens the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON -// response object. -func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - - items := make(map[string]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) - for k, item := range a { - items[k] = *flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice flattens the contents of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON -// response object. -func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - - if len(a) == 0 { - return []ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - - items := make([]ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget expands an instance of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget into a JSON -// request object. -func expandClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, f *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.TargetGkeCluster; !dcl.IsEmptyValueIndirect(v) { - m["targetGkeCluster"] = v - } - if v := f.ClusterNamespace; !dcl.IsEmptyValueIndirect(v) { - m["clusterNamespace"] = v - } - - return m, nil -} - -// flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget flattens an instance of ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget from a JSON -// response object. -func flattenClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget(c *Client, i interface{}, res *Cluster) *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget - } - r.TargetGkeCluster = dcl.FlattenString(m["targetGkeCluster"]) - r.ClusterNamespace = dcl.FlattenString(m["clusterNamespace"]) - - return r -} - -// expandClusterConfigMetastoreConfigMap expands the contents of ClusterConfigMetastoreConfig into a JSON -// request object. -func expandClusterConfigMetastoreConfigMap(c *Client, f map[string]ClusterConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigMetastoreConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigMetastoreConfigSlice expands the contents of ClusterConfigMetastoreConfig into a JSON -// request object. -func expandClusterConfigMetastoreConfigSlice(c *Client, f []ClusterConfigMetastoreConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigMetastoreConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigMetastoreConfigMap flattens the contents of ClusterConfigMetastoreConfig from a JSON -// response object. -func flattenClusterConfigMetastoreConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMetastoreConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMetastoreConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMetastoreConfig{} - } - - items := make(map[string]ClusterConfigMetastoreConfig) - for k, item := range a { - items[k] = *flattenClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigMetastoreConfigSlice flattens the contents of ClusterConfigMetastoreConfig from a JSON -// response object. -func flattenClusterConfigMetastoreConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMetastoreConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMetastoreConfig{} - } - - if len(a) == 0 { - return []ClusterConfigMetastoreConfig{} - } - - items := make([]ClusterConfigMetastoreConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMetastoreConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigMetastoreConfig expands an instance of ClusterConfigMetastoreConfig into a JSON -// request object. -func expandClusterConfigMetastoreConfig(c *Client, f *ClusterConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { - m["dataprocMetastoreService"] = v - } - - return m, nil -} - -// flattenClusterConfigMetastoreConfig flattens an instance of ClusterConfigMetastoreConfig from a JSON -// response object. -func flattenClusterConfigMetastoreConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigMetastoreConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigMetastoreConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigMetastoreConfig - } - r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) - - return r -} - -// expandClusterConfigDataprocMetricConfigMap expands the contents of ClusterConfigDataprocMetricConfig into a JSON -// request object. -func expandClusterConfigDataprocMetricConfigMap(c *Client, f map[string]ClusterConfigDataprocMetricConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigDataprocMetricConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigDataprocMetricConfigSlice expands the contents of ClusterConfigDataprocMetricConfig into a JSON -// request object. -func expandClusterConfigDataprocMetricConfigSlice(c *Client, f []ClusterConfigDataprocMetricConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigDataprocMetricConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigDataprocMetricConfigMap flattens the contents of ClusterConfigDataprocMetricConfig from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigDataprocMetricConfig{} - } - - if len(a) == 0 { - return map[string]ClusterConfigDataprocMetricConfig{} - } - - items := make(map[string]ClusterConfigDataprocMetricConfig) - for k, item := range a { - items[k] = *flattenClusterConfigDataprocMetricConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigDataprocMetricConfigSlice flattens the contents of ClusterConfigDataprocMetricConfig from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigDataprocMetricConfig{} - } - - if len(a) == 0 { - return []ClusterConfigDataprocMetricConfig{} - } - - items := make([]ClusterConfigDataprocMetricConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigDataprocMetricConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigDataprocMetricConfig expands an instance of ClusterConfigDataprocMetricConfig into a JSON -// request object. -func expandClusterConfigDataprocMetricConfig(c *Client, f *ClusterConfigDataprocMetricConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandClusterConfigDataprocMetricConfigMetricsSlice(c, f.Metrics, res); err != nil { - return nil, fmt.Errorf("error expanding Metrics into metrics: %w", err) - } else if v != nil { - m["metrics"] = v - } - - return m, nil -} - -// flattenClusterConfigDataprocMetricConfig flattens an instance of ClusterConfigDataprocMetricConfig from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfig(c *Client, i interface{}, res *Cluster) *ClusterConfigDataprocMetricConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigDataprocMetricConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigDataprocMetricConfig - } - r.Metrics = flattenClusterConfigDataprocMetricConfigMetricsSlice(c, m["metrics"], res) - - return r -} - -// expandClusterConfigDataprocMetricConfigMetricsMap expands the contents of ClusterConfigDataprocMetricConfigMetrics into a JSON -// request object. -func expandClusterConfigDataprocMetricConfigMetricsMap(c *Client, f map[string]ClusterConfigDataprocMetricConfigMetrics, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterConfigDataprocMetricConfigMetrics(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterConfigDataprocMetricConfigMetricsSlice expands the contents of ClusterConfigDataprocMetricConfigMetrics into a JSON -// request object. -func expandClusterConfigDataprocMetricConfigMetricsSlice(c *Client, f []ClusterConfigDataprocMetricConfigMetrics, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterConfigDataprocMetricConfigMetrics(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterConfigDataprocMetricConfigMetricsMap flattens the contents of ClusterConfigDataprocMetricConfigMetrics from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMetricsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfigMetrics { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigDataprocMetricConfigMetrics{} - } - - if len(a) == 0 { - return map[string]ClusterConfigDataprocMetricConfigMetrics{} - } - - items := make(map[string]ClusterConfigDataprocMetricConfigMetrics) - for k, item := range a { - items[k] = *flattenClusterConfigDataprocMetricConfigMetrics(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterConfigDataprocMetricConfigMetricsSlice flattens the contents of ClusterConfigDataprocMetricConfigMetrics from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMetricsSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfigMetrics { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigDataprocMetricConfigMetrics{} - } - - if len(a) == 0 { - return []ClusterConfigDataprocMetricConfigMetrics{} - } - - items := make([]ClusterConfigDataprocMetricConfigMetrics, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigDataprocMetricConfigMetrics(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterConfigDataprocMetricConfigMetrics expands an instance of ClusterConfigDataprocMetricConfigMetrics into a JSON -// request object. -func expandClusterConfigDataprocMetricConfigMetrics(c *Client, f *ClusterConfigDataprocMetricConfigMetrics, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MetricSource; !dcl.IsEmptyValueIndirect(v) { - m["metricSource"] = v - } - if v := f.MetricOverrides; v != nil { - m["metricOverrides"] = v - } - - return m, nil -} - -// flattenClusterConfigDataprocMetricConfigMetrics flattens an instance of ClusterConfigDataprocMetricConfigMetrics from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMetrics(c *Client, i interface{}, res *Cluster) *ClusterConfigDataprocMetricConfigMetrics { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterConfigDataprocMetricConfigMetrics{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterConfigDataprocMetricConfigMetrics - } - r.MetricSource = flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(m["metricSource"]) - r.MetricOverrides = dcl.FlattenStringSlice(m["metricOverrides"]) - - return r -} - -// expandClusterStatusMap expands the contents of ClusterStatus into a JSON -// request object. -func expandClusterStatusMap(c *Client, f map[string]ClusterStatus, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterStatus(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterStatusSlice expands the contents of ClusterStatus into a JSON -// request object. -func expandClusterStatusSlice(c *Client, f []ClusterStatus, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterStatus(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterStatusMap flattens the contents of ClusterStatus from a JSON -// response object. -func flattenClusterStatusMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatus { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatus{} - } - - if len(a) == 0 { - return map[string]ClusterStatus{} - } - - items := make(map[string]ClusterStatus) - for k, item := range a { - items[k] = *flattenClusterStatus(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterStatusSlice flattens the contents of ClusterStatus from a JSON -// response object. -func flattenClusterStatusSlice(c *Client, i interface{}, res *Cluster) []ClusterStatus { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatus{} - } - - if len(a) == 0 { - return []ClusterStatus{} - } - - items := make([]ClusterStatus, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatus(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterStatus expands an instance of ClusterStatus into a JSON -// request object. -func expandClusterStatus(c *Client, f *ClusterStatus, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenClusterStatus flattens an instance of ClusterStatus from a JSON -// response object. -func flattenClusterStatus(c *Client, i interface{}, res *Cluster) *ClusterStatus { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterStatus{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterStatus - } - r.State = flattenClusterStatusStateEnum(m["state"]) - r.Detail = dcl.FlattenString(m["detail"]) - r.StateStartTime = dcl.FlattenString(m["stateStartTime"]) - r.Substate = flattenClusterStatusSubstateEnum(m["substate"]) - - return r -} - -// expandClusterStatusHistoryMap expands the contents of ClusterStatusHistory into a JSON -// request object. -func expandClusterStatusHistoryMap(c *Client, f map[string]ClusterStatusHistory, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterStatusHistory(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterStatusHistorySlice expands the contents of ClusterStatusHistory into a JSON -// request object. -func expandClusterStatusHistorySlice(c *Client, f []ClusterStatusHistory, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterStatusHistory(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterStatusHistoryMap flattens the contents of ClusterStatusHistory from a JSON -// response object. -func flattenClusterStatusHistoryMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistory { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatusHistory{} - } - - if len(a) == 0 { - return map[string]ClusterStatusHistory{} - } - - items := make(map[string]ClusterStatusHistory) - for k, item := range a { - items[k] = *flattenClusterStatusHistory(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterStatusHistorySlice flattens the contents of ClusterStatusHistory from a JSON -// response object. -func flattenClusterStatusHistorySlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistory { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatusHistory{} - } - - if len(a) == 0 { - return []ClusterStatusHistory{} - } - - items := make([]ClusterStatusHistory, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatusHistory(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterStatusHistory expands an instance of ClusterStatusHistory into a JSON -// request object. -func expandClusterStatusHistory(c *Client, f *ClusterStatusHistory, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenClusterStatusHistory flattens an instance of ClusterStatusHistory from a JSON -// response object. -func flattenClusterStatusHistory(c *Client, i interface{}, res *Cluster) *ClusterStatusHistory { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterStatusHistory{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterStatusHistory - } - r.State = flattenClusterStatusHistoryStateEnum(m["state"]) - r.Detail = dcl.FlattenString(m["detail"]) - r.StateStartTime = dcl.FlattenString(m["stateStartTime"]) - r.Substate = flattenClusterStatusHistorySubstateEnum(m["substate"]) - - return r -} - -// expandClusterMetricsMap expands the contents of ClusterMetrics into a JSON -// request object. -func expandClusterMetricsMap(c *Client, f map[string]ClusterMetrics, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterMetrics(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterMetricsSlice expands the contents of ClusterMetrics into a JSON -// request object. -func expandClusterMetricsSlice(c *Client, f []ClusterMetrics, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterMetrics(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterMetricsMap flattens the contents of ClusterMetrics from a JSON -// response object. -func flattenClusterMetricsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterMetrics { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterMetrics{} - } - - if len(a) == 0 { - return map[string]ClusterMetrics{} - } - - items := make(map[string]ClusterMetrics) - for k, item := range a { - items[k] = *flattenClusterMetrics(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterMetricsSlice flattens the contents of ClusterMetrics from a JSON -// response object. -func flattenClusterMetricsSlice(c *Client, i interface{}, res *Cluster) []ClusterMetrics { - a, ok := i.([]interface{}) - if !ok { - return []ClusterMetrics{} - } - - if len(a) == 0 { - return []ClusterMetrics{} - } - - items := make([]ClusterMetrics, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterMetrics(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterMetrics expands an instance of ClusterMetrics into a JSON -// request object. -func expandClusterMetrics(c *Client, f *ClusterMetrics, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.HdfsMetrics; !dcl.IsEmptyValueIndirect(v) { - m["hdfsMetrics"] = v - } - if v := f.YarnMetrics; !dcl.IsEmptyValueIndirect(v) { - m["yarnMetrics"] = v - } - - return m, nil -} - -// flattenClusterMetrics flattens an instance of ClusterMetrics from a JSON -// response object. -func flattenClusterMetrics(c *Client, i interface{}, res *Cluster) *ClusterMetrics { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterMetrics{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterMetrics - } - r.HdfsMetrics = dcl.FlattenKeyValuePairs(m["hdfsMetrics"]) - r.YarnMetrics = dcl.FlattenKeyValuePairs(m["yarnMetrics"]) - - return r -} - -// expandClusterVirtualClusterConfigMap expands the contents of ClusterVirtualClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigSlice expands the contents of ClusterVirtualClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigMap flattens the contents of ClusterVirtualClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigSlice flattens the contents of ClusterVirtualClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfig{} - } - - items := make([]ClusterVirtualClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfig expands an instance of ClusterVirtualClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfig(c *Client, f *ClusterVirtualClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.StagingBucket; !dcl.IsEmptyValueIndirect(v) { - m["stagingBucket"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, f.KubernetesClusterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding KubernetesClusterConfig into kubernetesClusterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["kubernetesClusterConfig"] = v - } - if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, f.AuxiliaryServicesConfig, res); err != nil { - return nil, fmt.Errorf("error expanding AuxiliaryServicesConfig into auxiliaryServicesConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["auxiliaryServicesConfig"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfig flattens an instance of ClusterVirtualClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfig - } - r.StagingBucket = dcl.FlattenString(m["stagingBucket"]) - r.KubernetesClusterConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, m["kubernetesClusterConfig"], res) - r.AuxiliaryServicesConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, m["auxiliaryServicesConfig"], res) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.KubernetesNamespace; !dcl.IsEmptyValueIndirect(v) { - m["kubernetesNamespace"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, f.GkeClusterConfig, res); err != nil { - return nil, fmt.Errorf("error expanding GkeClusterConfig into gkeClusterConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gkeClusterConfig"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, f.KubernetesSoftwareConfig, res); err != nil { - return nil, fmt.Errorf("error expanding KubernetesSoftwareConfig into kubernetesSoftwareConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["kubernetesSoftwareConfig"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfig - } - r.KubernetesNamespace = dcl.FlattenString(m["kubernetesNamespace"]) - r.GkeClusterConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, m["gkeClusterConfig"], res) - r.KubernetesSoftwareConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, m["kubernetesSoftwareConfig"], res) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GkeClusterTarget; !dcl.IsEmptyValueIndirect(v) { - m["gkeClusterTarget"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, f.NodePoolTarget, res); err != nil { - return nil, fmt.Errorf("error expanding NodePoolTarget into nodePoolTarget: %w", err) - } else if v != nil { - m["nodePoolTarget"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig - } - r.GkeClusterTarget = dcl.FlattenString(m["gkeClusterTarget"]) - r.NodePoolTarget = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c, m["nodePoolTarget"], res) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NodePool; !dcl.IsEmptyValueIndirect(v) { - m["nodePool"] = v - } - if v := f.Roles; v != nil { - m["roles"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, f.NodePoolConfig, res); err != nil { - return nil, fmt.Errorf("error expanding NodePoolConfig into nodePoolConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["nodePoolConfig"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget - } - r.NodePool = dcl.FlattenString(m["nodePool"]) - r.Roles = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice(c, m["roles"], res) - r.NodePoolConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, m["nodePoolConfig"], res) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, f.Config, res); err != nil { - return nil, fmt.Errorf("error expanding Config into config: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["config"] = v - } - if v := f.Locations; v != nil { - m["locations"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, f.Autoscaling, res); err != nil { - return nil, fmt.Errorf("error expanding Autoscaling into autoscaling: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["autoscaling"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig - } - r.Config = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, m["config"], res) - r.Locations = dcl.FlattenStringSlice(m["locations"]) - r.Autoscaling = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, m["autoscaling"], res) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MachineType; !dcl.IsEmptyValueIndirect(v) { - m["machineType"] = v - } - if v := f.LocalSsdCount; !dcl.IsEmptyValueIndirect(v) { - m["localSsdCount"] = v - } - if v := f.Preemptible; !dcl.IsEmptyValueIndirect(v) { - m["preemptible"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, f.Accelerators, res); err != nil { - return nil, fmt.Errorf("error expanding Accelerators into accelerators: %w", err) - } else if v != nil { - m["accelerators"] = v - } - if v := f.MinCpuPlatform; !dcl.IsEmptyValueIndirect(v) { - m["minCpuPlatform"] = v - } - if v := f.BootDiskKmsKey; !dcl.IsEmptyValueIndirect(v) { - m["bootDiskKmsKey"] = v - } - if v, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, f.EphemeralStorageConfig, res); err != nil { - return nil, fmt.Errorf("error expanding EphemeralStorageConfig into ephemeralStorageConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["ephemeralStorageConfig"] = v - } - if v := f.Spot; !dcl.IsEmptyValueIndirect(v) { - m["spot"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig - } - r.MachineType = dcl.FlattenString(m["machineType"]) - r.LocalSsdCount = dcl.FlattenInteger(m["localSsdCount"]) - r.Preemptible = dcl.FlattenBool(m["preemptible"]) - r.Accelerators = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c, m["accelerators"], res) - r.MinCpuPlatform = dcl.FlattenString(m["minCpuPlatform"]) - r.BootDiskKmsKey = dcl.FlattenString(m["bootDiskKmsKey"]) - r.EphemeralStorageConfig = flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, m["ephemeralStorageConfig"], res) - r.Spot = dcl.FlattenBool(m["spot"]) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AcceleratorCount; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorCount"] = v - } - if v := f.AcceleratorType; !dcl.IsEmptyValueIndirect(v) { - m["acceleratorType"] = v - } - if v := f.GpuPartitionSize; !dcl.IsEmptyValueIndirect(v) { - m["gpuPartitionSize"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators - } - r.AcceleratorCount = dcl.FlattenInteger(m["acceleratorCount"]) - r.AcceleratorType = dcl.FlattenString(m["acceleratorType"]) - r.GpuPartitionSize = dcl.FlattenString(m["gpuPartitionSize"]) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LocalSsdCount; !dcl.IsEmptyValueIndirect(v) { - m["localSsdCount"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig - } - r.LocalSsdCount = dcl.FlattenInteger(m["localSsdCount"]) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MinNodeCount; !dcl.IsEmptyValueIndirect(v) { - m["minNodeCount"] = v - } - if v := f.MaxNodeCount; !dcl.IsEmptyValueIndirect(v) { - m["maxNodeCount"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling - } - r.MinNodeCount = dcl.FlattenInteger(m["minNodeCount"]) - r.MaxNodeCount = dcl.FlattenInteger(m["maxNodeCount"]) - - return r -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice expands the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, f []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig expands an instance of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, f *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ComponentVersion; !dcl.IsEmptyValueIndirect(v) { - m["componentVersion"] = v - } - if v := f.Properties; !dcl.IsEmptyValueIndirect(v) { - m["properties"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig flattens an instance of ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig - } - r.ComponentVersion = dcl.FlattenKeyValuePairs(m["componentVersion"]) - r.Properties = dcl.FlattenKeyValuePairs(m["properties"]) - - return r -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, f.MetastoreConfig, res); err != nil { - return nil, fmt.Errorf("error expanding MetastoreConfig into metastoreConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metastoreConfig"] = v - } - if v, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, f.SparkHistoryServerConfig, res); err != nil { - return nil, fmt.Errorf("error expanding SparkHistoryServerConfig into sparkHistoryServerConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["sparkHistoryServerConfig"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfig - } - r.MetastoreConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, m["metastoreConfig"], res) - r.SparkHistoryServerConfig = flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, m["sparkHistoryServerConfig"], res) - - return r -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DataprocMetastoreService; !dcl.IsEmptyValueIndirect(v) { - m["dataprocMetastoreService"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig - } - r.DataprocMetastoreService = dcl.FlattenString(m["dataprocMetastoreService"]) - - return r -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap(c *Client, f map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice expands the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, f []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - - items := make(map[string]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice flattens the contents of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - - items := make([]ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig expands an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig into a JSON -// request object. -func expandClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, f *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig, res *Cluster) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DataprocCluster; !dcl.IsEmptyValueIndirect(v) { - m["dataprocCluster"] = v - } - - return m, nil -} - -// flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig flattens an instance of ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig from a JSON -// response object. -func flattenClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig(c *Client, i interface{}, res *Cluster) *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig - } - r.DataprocCluster = dcl.FlattenString(m["dataprocCluster"]) - - return r -} - -// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap flattens the contents of ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON -// response object. -func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} - } - - items := make(map[string]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice flattens the contents of ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum from a JSON -// response object. -func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum{} - } - - items := make([]ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum with the same value as that string. -func flattenClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum(i interface{}) *ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(s) -} - -// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap flattens the contents of ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON -// response object. -func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} - } - - items := make(map[string]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum) - for k, item := range a { - items[k] = *flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice flattens the contents of ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum from a JSON -// response object. -func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} - } - - if len(a) == 0 { - return []ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum{} - } - - items := make([]ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum with the same value as that string. -func flattenClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum(i interface{}) *ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(s) -} - -// flattenClusterConfigMasterConfigPreemptibilityEnumMap flattens the contents of ClusterConfigMasterConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigMasterConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigMasterConfigPreemptibilityEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigMasterConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigMasterConfigPreemptibilityEnum{} - } - - items := make(map[string]ClusterConfigMasterConfigPreemptibilityEnum) - for k, item := range a { - items[k] = *flattenClusterConfigMasterConfigPreemptibilityEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigMasterConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigMasterConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigMasterConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigMasterConfigPreemptibilityEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigMasterConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return []ClusterConfigMasterConfigPreemptibilityEnum{} - } - - items := make([]ClusterConfigMasterConfigPreemptibilityEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigMasterConfigPreemptibilityEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigMasterConfigPreemptibilityEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigMasterConfigPreemptibilityEnum with the same value as that string. -func flattenClusterConfigMasterConfigPreemptibilityEnum(i interface{}) *ClusterConfigMasterConfigPreemptibilityEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigMasterConfigPreemptibilityEnumRef(s) -} - -// flattenClusterConfigWorkerConfigPreemptibilityEnumMap flattens the contents of ClusterConfigWorkerConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigWorkerConfigPreemptibilityEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigWorkerConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigWorkerConfigPreemptibilityEnum{} - } - - items := make(map[string]ClusterConfigWorkerConfigPreemptibilityEnum) - for k, item := range a { - items[k] = *flattenClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigWorkerConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigWorkerConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigWorkerConfigPreemptibilityEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigWorkerConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return []ClusterConfigWorkerConfigPreemptibilityEnum{} - } - - items := make([]ClusterConfigWorkerConfigPreemptibilityEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigWorkerConfigPreemptibilityEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigWorkerConfigPreemptibilityEnum with the same value as that string. -func flattenClusterConfigWorkerConfigPreemptibilityEnum(i interface{}) *ClusterConfigWorkerConfigPreemptibilityEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigWorkerConfigPreemptibilityEnumRef(s) -} - -// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap flattens the contents of ClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} - } - - items := make(map[string]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum) - for k, item := range a { - items[k] = *flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice flattens the contents of ClusterConfigSecondaryWorkerConfigPreemptibilityEnum from a JSON -// response object. -func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} - } - - if len(a) == 0 { - return []ClusterConfigSecondaryWorkerConfigPreemptibilityEnum{} - } - - items := make([]ClusterConfigSecondaryWorkerConfigPreemptibilityEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum with the same value as that string. -func flattenClusterConfigSecondaryWorkerConfigPreemptibilityEnum(i interface{}) *ClusterConfigSecondaryWorkerConfigPreemptibilityEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigSecondaryWorkerConfigPreemptibilityEnumRef(s) -} - -// flattenClusterConfigSoftwareConfigOptionalComponentsEnumMap flattens the contents of ClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON -// response object. -func flattenClusterConfigSoftwareConfigOptionalComponentsEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum{} - } - - items := make(map[string]ClusterConfigSoftwareConfigOptionalComponentsEnum) - for k, item := range a { - items[k] = *flattenClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice flattens the contents of ClusterConfigSoftwareConfigOptionalComponentsEnum from a JSON -// response object. -func flattenClusterConfigSoftwareConfigOptionalComponentsEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigSoftwareConfigOptionalComponentsEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigSoftwareConfigOptionalComponentsEnum{} - } - - if len(a) == 0 { - return []ClusterConfigSoftwareConfigOptionalComponentsEnum{} - } - - items := make([]ClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigSoftwareConfigOptionalComponentsEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigSoftwareConfigOptionalComponentsEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigSoftwareConfigOptionalComponentsEnum with the same value as that string. -func flattenClusterConfigSoftwareConfigOptionalComponentsEnum(i interface{}) *ClusterConfigSoftwareConfigOptionalComponentsEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigSoftwareConfigOptionalComponentsEnumRef(s) -} - -// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumMap flattens the contents of ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} - } - - if len(a) == 0 { - return map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} - } - - items := make(map[string]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum) - for k, item := range a { - items[k] = *flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(item.(interface{})) - } - - return items -} - -// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumSlice flattens the contents of ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum from a JSON -// response object. -func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} - } - - if len(a) == 0 { - return []ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum{} - } - - items := make([]ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum with the same value as that string. -func flattenClusterConfigDataprocMetricConfigMetricsMetricSourceEnum(i interface{}) *ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterConfigDataprocMetricConfigMetricsMetricSourceEnumRef(s) -} - -// flattenClusterStatusStateEnumMap flattens the contents of ClusterStatusStateEnum from a JSON -// response object. -func flattenClusterStatusStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatusStateEnum{} - } - - if len(a) == 0 { - return map[string]ClusterStatusStateEnum{} - } - - items := make(map[string]ClusterStatusStateEnum) - for k, item := range a { - items[k] = *flattenClusterStatusStateEnum(item.(interface{})) - } - - return items -} - -// flattenClusterStatusStateEnumSlice flattens the contents of ClusterStatusStateEnum from a JSON -// response object. -func flattenClusterStatusStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatusStateEnum{} - } - - if len(a) == 0 { - return []ClusterStatusStateEnum{} - } - - items := make([]ClusterStatusStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatusStateEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterStatusStateEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterStatusStateEnum with the same value as that string. -func flattenClusterStatusStateEnum(i interface{}) *ClusterStatusStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterStatusStateEnumRef(s) -} - -// flattenClusterStatusSubstateEnumMap flattens the contents of ClusterStatusSubstateEnum from a JSON -// response object. -func flattenClusterStatusSubstateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusSubstateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatusSubstateEnum{} - } - - if len(a) == 0 { - return map[string]ClusterStatusSubstateEnum{} - } - - items := make(map[string]ClusterStatusSubstateEnum) - for k, item := range a { - items[k] = *flattenClusterStatusSubstateEnum(item.(interface{})) - } - - return items -} - -// flattenClusterStatusSubstateEnumSlice flattens the contents of ClusterStatusSubstateEnum from a JSON -// response object. -func flattenClusterStatusSubstateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusSubstateEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatusSubstateEnum{} - } - - if len(a) == 0 { - return []ClusterStatusSubstateEnum{} - } - - items := make([]ClusterStatusSubstateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatusSubstateEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterStatusSubstateEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterStatusSubstateEnum with the same value as that string. -func flattenClusterStatusSubstateEnum(i interface{}) *ClusterStatusSubstateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterStatusSubstateEnumRef(s) -} - -// flattenClusterStatusHistoryStateEnumMap flattens the contents of ClusterStatusHistoryStateEnum from a JSON -// response object. -func flattenClusterStatusHistoryStateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistoryStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatusHistoryStateEnum{} - } - - if len(a) == 0 { - return map[string]ClusterStatusHistoryStateEnum{} - } - - items := make(map[string]ClusterStatusHistoryStateEnum) - for k, item := range a { - items[k] = *flattenClusterStatusHistoryStateEnum(item.(interface{})) - } - - return items -} - -// flattenClusterStatusHistoryStateEnumSlice flattens the contents of ClusterStatusHistoryStateEnum from a JSON -// response object. -func flattenClusterStatusHistoryStateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistoryStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatusHistoryStateEnum{} - } - - if len(a) == 0 { - return []ClusterStatusHistoryStateEnum{} - } - - items := make([]ClusterStatusHistoryStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatusHistoryStateEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterStatusHistoryStateEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterStatusHistoryStateEnum with the same value as that string. -func flattenClusterStatusHistoryStateEnum(i interface{}) *ClusterStatusHistoryStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterStatusHistoryStateEnumRef(s) -} - -// flattenClusterStatusHistorySubstateEnumMap flattens the contents of ClusterStatusHistorySubstateEnum from a JSON -// response object. -func flattenClusterStatusHistorySubstateEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterStatusHistorySubstateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterStatusHistorySubstateEnum{} - } - - if len(a) == 0 { - return map[string]ClusterStatusHistorySubstateEnum{} - } - - items := make(map[string]ClusterStatusHistorySubstateEnum) - for k, item := range a { - items[k] = *flattenClusterStatusHistorySubstateEnum(item.(interface{})) - } - - return items -} - -// flattenClusterStatusHistorySubstateEnumSlice flattens the contents of ClusterStatusHistorySubstateEnum from a JSON -// response object. -func flattenClusterStatusHistorySubstateEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterStatusHistorySubstateEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterStatusHistorySubstateEnum{} - } - - if len(a) == 0 { - return []ClusterStatusHistorySubstateEnum{} - } - - items := make([]ClusterStatusHistorySubstateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterStatusHistorySubstateEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterStatusHistorySubstateEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterStatusHistorySubstateEnum with the same value as that string. -func flattenClusterStatusHistorySubstateEnum(i interface{}) *ClusterStatusHistorySubstateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterStatusHistorySubstateEnumRef(s) -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumMap flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumMap(c *Client, i interface{}, res *Cluster) map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} - } - - if len(a) == 0 { - return map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} - } - - items := make(map[string]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum) - for k, item := range a { - items[k] = *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(item.(interface{})) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice flattens the contents of ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum from a JSON -// response object. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumSlice(c *Client, i interface{}, res *Cluster) []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { - a, ok := i.([]interface{}) - if !ok { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} - } - - if len(a) == 0 { - return []ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum{} - } - - items := make([]ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(item.(interface{}))) - } - - return items -} - -// flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum asserts that an interface is a string, and returns a -// pointer to a *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum with the same value as that string. -func flattenClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum(i interface{}) *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Cluster) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalCluster(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type clusterDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp clusterApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToClusterDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]clusterDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []clusterDiff - // For each operation name, create a clusterDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := clusterDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToClusterApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToClusterApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (clusterApiOperation, error) { - switch opName { - - case "updateClusterUpdateClusterOperation": - return &updateClusterUpdateClusterOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractClusterFields(r *Cluster) error { - vConfig := r.Config - if vConfig == nil { - // note: explicitly not the empty object. - vConfig = &ClusterConfig{} - } - if err := extractClusterConfigFields(r, vConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfig) { - r.Config = vConfig - } - vStatus := r.Status - if vStatus == nil { - // note: explicitly not the empty object. - vStatus = &ClusterStatus{} - } - if err := extractClusterStatusFields(r, vStatus); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vStatus) { - r.Status = vStatus - } - vMetrics := r.Metrics - if vMetrics == nil { - // note: explicitly not the empty object. - vMetrics = &ClusterMetrics{} - } - if err := extractClusterMetricsFields(r, vMetrics); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetrics) { - r.Metrics = vMetrics - } - vVirtualClusterConfig := r.VirtualClusterConfig - if vVirtualClusterConfig == nil { - // note: explicitly not the empty object. - vVirtualClusterConfig = &ClusterVirtualClusterConfig{} - } - if err := extractClusterVirtualClusterConfigFields(r, vVirtualClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vVirtualClusterConfig) { - r.VirtualClusterConfig = vVirtualClusterConfig - } - return nil -} -func extractClusterConfigFields(r *Cluster, o *ClusterConfig) error { - vGceClusterConfig := o.GceClusterConfig - if vGceClusterConfig == nil { - // note: explicitly not the empty object. - vGceClusterConfig = &ClusterConfigGceClusterConfig{} - } - if err := extractClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { - o.GceClusterConfig = vGceClusterConfig - } - vMasterConfig := o.MasterConfig - if vMasterConfig == nil { - // note: explicitly not the empty object. - vMasterConfig = &ClusterConfigMasterConfig{} - } - if err := extractClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMasterConfig) { - o.MasterConfig = vMasterConfig - } - vWorkerConfig := o.WorkerConfig - if vWorkerConfig == nil { - // note: explicitly not the empty object. - vWorkerConfig = &ClusterConfigWorkerConfig{} - } - if err := extractClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWorkerConfig) { - o.WorkerConfig = vWorkerConfig - } - vSecondaryWorkerConfig := o.SecondaryWorkerConfig - if vSecondaryWorkerConfig == nil { - // note: explicitly not the empty object. - vSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { - o.SecondaryWorkerConfig = vSecondaryWorkerConfig - } - vSoftwareConfig := o.SoftwareConfig - if vSoftwareConfig == nil { - // note: explicitly not the empty object. - vSoftwareConfig = &ClusterConfigSoftwareConfig{} - } - if err := extractClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { - o.SoftwareConfig = vSoftwareConfig - } - vEncryptionConfig := o.EncryptionConfig - if vEncryptionConfig == nil { - // note: explicitly not the empty object. - vEncryptionConfig = &ClusterConfigEncryptionConfig{} - } - if err := extractClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { - o.EncryptionConfig = vEncryptionConfig - } - vAutoscalingConfig := o.AutoscalingConfig - if vAutoscalingConfig == nil { - // note: explicitly not the empty object. - vAutoscalingConfig = &ClusterConfigAutoscalingConfig{} - } - if err := extractClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { - o.AutoscalingConfig = vAutoscalingConfig - } - vSecurityConfig := o.SecurityConfig - if vSecurityConfig == nil { - // note: explicitly not the empty object. - vSecurityConfig = &ClusterConfigSecurityConfig{} - } - if err := extractClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecurityConfig) { - o.SecurityConfig = vSecurityConfig - } - vLifecycleConfig := o.LifecycleConfig - if vLifecycleConfig == nil { - // note: explicitly not the empty object. - vLifecycleConfig = &ClusterConfigLifecycleConfig{} - } - if err := extractClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { - o.LifecycleConfig = vLifecycleConfig - } - vEndpointConfig := o.EndpointConfig - if vEndpointConfig == nil { - // note: explicitly not the empty object. - vEndpointConfig = &ClusterConfigEndpointConfig{} - } - if err := extractClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEndpointConfig) { - o.EndpointConfig = vEndpointConfig - } - vGkeClusterConfig := o.GkeClusterConfig - if vGkeClusterConfig == nil { - // note: explicitly not the empty object. - vGkeClusterConfig = &ClusterConfigGkeClusterConfig{} - } - if err := extractClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { - o.GkeClusterConfig = vGkeClusterConfig - } - vMetastoreConfig := o.MetastoreConfig - if vMetastoreConfig == nil { - // note: explicitly not the empty object. - vMetastoreConfig = &ClusterConfigMetastoreConfig{} - } - if err := extractClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { - o.MetastoreConfig = vMetastoreConfig - } - vDataprocMetricConfig := o.DataprocMetricConfig - if vDataprocMetricConfig == nil { - // note: explicitly not the empty object. - vDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{} - } - if err := extractClusterConfigDataprocMetricConfigFields(r, vDataprocMetricConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDataprocMetricConfig) { - o.DataprocMetricConfig = vDataprocMetricConfig - } - return nil -} -func extractClusterConfigGceClusterConfigFields(r *Cluster, o *ClusterConfigGceClusterConfig) error { - vReservationAffinity := o.ReservationAffinity - if vReservationAffinity == nil { - // note: explicitly not the empty object. - vReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{} - } - if err := extractClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vReservationAffinity) { - o.ReservationAffinity = vReservationAffinity - } - vNodeGroupAffinity := o.NodeGroupAffinity - if vNodeGroupAffinity == nil { - // note: explicitly not the empty object. - vNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - if err := extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { - o.NodeGroupAffinity = vNodeGroupAffinity - } - vShieldedInstanceConfig := o.ShieldedInstanceConfig - if vShieldedInstanceConfig == nil { - // note: explicitly not the empty object. - vShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - if err := extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { - o.ShieldedInstanceConfig = vShieldedInstanceConfig - } - vConfidentialInstanceConfig := o.ConfidentialInstanceConfig - if vConfidentialInstanceConfig == nil { - // note: explicitly not the empty object. - vConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - if err := extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r, vConfidentialInstanceConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfidentialInstanceConfig) { - o.ConfidentialInstanceConfig = vConfidentialInstanceConfig - } - return nil -} -func extractClusterConfigGceClusterConfigReservationAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigReservationAffinity) error { - return nil -} -func extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigNodeGroupAffinity) error { - return nil -} -func extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigShieldedInstanceConfig) error { - return nil -} -func extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigConfidentialInstanceConfig) error { - return nil -} -func extractClusterConfigMasterConfigFields(r *Cluster, o *ClusterConfigMasterConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigMasterConfigDiskConfig{} - } - if err := extractClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{} - } - if err := extractClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func extractClusterConfigMasterConfigDiskConfigFields(r *Cluster, o *ClusterConfigMasterConfigDiskConfig) error { - return nil -} -func extractClusterConfigMasterConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigMasterConfigManagedGroupConfig) error { - return nil -} -func extractClusterConfigMasterConfigAcceleratorsFields(r *Cluster, o *ClusterConfigMasterConfigAccelerators) error { - return nil -} -func extractClusterConfigMasterConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigMasterConfigInstanceReferences) error { - return nil -} -func extractClusterConfigWorkerConfigFields(r *Cluster, o *ClusterConfigWorkerConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigWorkerConfigDiskConfig{} - } - if err := extractClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{} - } - if err := extractClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func extractClusterConfigWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigWorkerConfigDiskConfig) error { - return nil -} -func extractClusterConfigWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigWorkerConfigManagedGroupConfig) error { - return nil -} -func extractClusterConfigWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigWorkerConfigAccelerators) error { - return nil -} -func extractClusterConfigWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigWorkerConfigInstanceReferences) error { - return nil -} -func extractClusterConfigSecondaryWorkerConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigDiskConfig) error { - return nil -} -func extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { - return nil -} -func extractClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigAccelerators) error { - return nil -} -func extractClusterConfigSecondaryWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigInstanceReferences) error { - return nil -} -func extractClusterConfigSoftwareConfigFields(r *Cluster, o *ClusterConfigSoftwareConfig) error { - return nil -} -func extractClusterConfigInitializationActionsFields(r *Cluster, o *ClusterConfigInitializationActions) error { - return nil -} -func extractClusterConfigEncryptionConfigFields(r *Cluster, o *ClusterConfigEncryptionConfig) error { - return nil -} -func extractClusterConfigAutoscalingConfigFields(r *Cluster, o *ClusterConfigAutoscalingConfig) error { - return nil -} -func extractClusterConfigSecurityConfigFields(r *Cluster, o *ClusterConfigSecurityConfig) error { - vKerberosConfig := o.KerberosConfig - if vKerberosConfig == nil { - // note: explicitly not the empty object. - vKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{} - } - if err := extractClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKerberosConfig) { - o.KerberosConfig = vKerberosConfig - } - vIdentityConfig := o.IdentityConfig - if vIdentityConfig == nil { - // note: explicitly not the empty object. - vIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{} - } - if err := extractClusterConfigSecurityConfigIdentityConfigFields(r, vIdentityConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vIdentityConfig) { - o.IdentityConfig = vIdentityConfig - } - return nil -} -func extractClusterConfigSecurityConfigKerberosConfigFields(r *Cluster, o *ClusterConfigSecurityConfigKerberosConfig) error { - return nil -} -func extractClusterConfigSecurityConfigIdentityConfigFields(r *Cluster, o *ClusterConfigSecurityConfigIdentityConfig) error { - return nil -} -func extractClusterConfigLifecycleConfigFields(r *Cluster, o *ClusterConfigLifecycleConfig) error { - return nil -} -func extractClusterConfigEndpointConfigFields(r *Cluster, o *ClusterConfigEndpointConfig) error { - return nil -} -func extractClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterConfigGkeClusterConfig) error { - vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget - if vNamespacedGkeDeploymentTarget == nil { - // note: explicitly not the empty object. - vNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - if err := extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { - o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget - } - return nil -} -func extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *Cluster, o *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { - return nil -} -func extractClusterConfigMetastoreConfigFields(r *Cluster, o *ClusterConfigMetastoreConfig) error { - return nil -} -func extractClusterConfigDataprocMetricConfigFields(r *Cluster, o *ClusterConfigDataprocMetricConfig) error { - return nil -} -func extractClusterConfigDataprocMetricConfigMetricsFields(r *Cluster, o *ClusterConfigDataprocMetricConfigMetrics) error { - return nil -} -func extractClusterStatusFields(r *Cluster, o *ClusterStatus) error { - return nil -} -func extractClusterStatusHistoryFields(r *Cluster, o *ClusterStatusHistory) error { - return nil -} -func extractClusterMetricsFields(r *Cluster, o *ClusterMetrics) error { - return nil -} -func extractClusterVirtualClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfig) error { - vKubernetesClusterConfig := o.KubernetesClusterConfig - if vKubernetesClusterConfig == nil { - // note: explicitly not the empty object. - vKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r, vKubernetesClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesClusterConfig) { - o.KubernetesClusterConfig = vKubernetesClusterConfig - } - vAuxiliaryServicesConfig := o.AuxiliaryServicesConfig - if vAuxiliaryServicesConfig == nil { - // note: explicitly not the empty object. - vAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r, vAuxiliaryServicesConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuxiliaryServicesConfig) { - o.AuxiliaryServicesConfig = vAuxiliaryServicesConfig - } - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfig) error { - vGkeClusterConfig := o.GkeClusterConfig - if vGkeClusterConfig == nil { - // note: explicitly not the empty object. - vGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { - o.GkeClusterConfig = vGkeClusterConfig - } - vKubernetesSoftwareConfig := o.KubernetesSoftwareConfig - if vKubernetesSoftwareConfig == nil { - // note: explicitly not the empty object. - vKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r, vKubernetesSoftwareConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesSoftwareConfig) { - o.KubernetesSoftwareConfig = vKubernetesSoftwareConfig - } - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) error { - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) error { - vNodePoolConfig := o.NodePoolConfig - if vNodePoolConfig == nil { - // note: explicitly not the empty object. - vNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r, vNodePoolConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNodePoolConfig) { - o.NodePoolConfig = vNodePoolConfig - } - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) error { - vConfig := o.Config - if vConfig == nil { - // note: explicitly not the empty object. - vConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r, vConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfig) { - o.Config = vConfig - } - vAutoscaling := o.Autoscaling - if vAutoscaling == nil { - // note: explicitly not the empty object. - vAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r, vAutoscaling); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAutoscaling) { - o.Autoscaling = vAutoscaling - } - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) error { - vEphemeralStorageConfig := o.EphemeralStorageConfig - if vEphemeralStorageConfig == nil { - // note: explicitly not the empty object. - vEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r, vEphemeralStorageConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEphemeralStorageConfig) { - o.EphemeralStorageConfig = vEphemeralStorageConfig - } - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) error { - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) error { - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) error { - return nil -} -func extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) error { - return nil -} -func extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfig) error { - vMetastoreConfig := o.MetastoreConfig - if vMetastoreConfig == nil { - // note: explicitly not the empty object. - vMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { - o.MetastoreConfig = vMetastoreConfig - } - vSparkHistoryServerConfig := o.SparkHistoryServerConfig - if vSparkHistoryServerConfig == nil { - // note: explicitly not the empty object. - vSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r, vSparkHistoryServerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkHistoryServerConfig) { - o.SparkHistoryServerConfig = vSparkHistoryServerConfig - } - return nil -} -func extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) error { - return nil -} -func extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) error { - return nil -} - -func postReadExtractClusterFields(r *Cluster) error { - vConfig := r.Config - if vConfig == nil { - // note: explicitly not the empty object. - vConfig = &ClusterConfig{} - } - if err := postReadExtractClusterConfigFields(r, vConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfig) { - r.Config = vConfig - } - vStatus := r.Status - if vStatus == nil { - // note: explicitly not the empty object. - vStatus = &ClusterStatus{} - } - if err := postReadExtractClusterStatusFields(r, vStatus); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vStatus) { - r.Status = vStatus - } - vMetrics := r.Metrics - if vMetrics == nil { - // note: explicitly not the empty object. - vMetrics = &ClusterMetrics{} - } - if err := postReadExtractClusterMetricsFields(r, vMetrics); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetrics) { - r.Metrics = vMetrics - } - vVirtualClusterConfig := r.VirtualClusterConfig - if vVirtualClusterConfig == nil { - // note: explicitly not the empty object. - vVirtualClusterConfig = &ClusterVirtualClusterConfig{} - } - if err := postReadExtractClusterVirtualClusterConfigFields(r, vVirtualClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vVirtualClusterConfig) { - r.VirtualClusterConfig = vVirtualClusterConfig - } - return nil -} -func postReadExtractClusterConfigFields(r *Cluster, o *ClusterConfig) error { - vGceClusterConfig := o.GceClusterConfig - if vGceClusterConfig == nil { - // note: explicitly not the empty object. - vGceClusterConfig = &ClusterConfigGceClusterConfig{} - } - if err := extractClusterConfigGceClusterConfigFields(r, vGceClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGceClusterConfig) { - o.GceClusterConfig = vGceClusterConfig - } - vMasterConfig := o.MasterConfig - if vMasterConfig == nil { - // note: explicitly not the empty object. - vMasterConfig = &ClusterConfigMasterConfig{} - } - if err := extractClusterConfigMasterConfigFields(r, vMasterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMasterConfig) { - o.MasterConfig = vMasterConfig - } - vWorkerConfig := o.WorkerConfig - if vWorkerConfig == nil { - // note: explicitly not the empty object. - vWorkerConfig = &ClusterConfigWorkerConfig{} - } - if err := extractClusterConfigWorkerConfigFields(r, vWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWorkerConfig) { - o.WorkerConfig = vWorkerConfig - } - vSecondaryWorkerConfig := o.SecondaryWorkerConfig - if vSecondaryWorkerConfig == nil { - // note: explicitly not the empty object. - vSecondaryWorkerConfig = &ClusterConfigSecondaryWorkerConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigFields(r, vSecondaryWorkerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryWorkerConfig) { - o.SecondaryWorkerConfig = vSecondaryWorkerConfig - } - vSoftwareConfig := o.SoftwareConfig - if vSoftwareConfig == nil { - // note: explicitly not the empty object. - vSoftwareConfig = &ClusterConfigSoftwareConfig{} - } - if err := extractClusterConfigSoftwareConfigFields(r, vSoftwareConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSoftwareConfig) { - o.SoftwareConfig = vSoftwareConfig - } - vEncryptionConfig := o.EncryptionConfig - if vEncryptionConfig == nil { - // note: explicitly not the empty object. - vEncryptionConfig = &ClusterConfigEncryptionConfig{} - } - if err := extractClusterConfigEncryptionConfigFields(r, vEncryptionConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEncryptionConfig) { - o.EncryptionConfig = vEncryptionConfig - } - vAutoscalingConfig := o.AutoscalingConfig - if vAutoscalingConfig == nil { - // note: explicitly not the empty object. - vAutoscalingConfig = &ClusterConfigAutoscalingConfig{} - } - if err := extractClusterConfigAutoscalingConfigFields(r, vAutoscalingConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAutoscalingConfig) { - o.AutoscalingConfig = vAutoscalingConfig - } - vSecurityConfig := o.SecurityConfig - if vSecurityConfig == nil { - // note: explicitly not the empty object. - vSecurityConfig = &ClusterConfigSecurityConfig{} - } - if err := extractClusterConfigSecurityConfigFields(r, vSecurityConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecurityConfig) { - o.SecurityConfig = vSecurityConfig - } - vLifecycleConfig := o.LifecycleConfig - if vLifecycleConfig == nil { - // note: explicitly not the empty object. - vLifecycleConfig = &ClusterConfigLifecycleConfig{} - } - if err := extractClusterConfigLifecycleConfigFields(r, vLifecycleConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLifecycleConfig) { - o.LifecycleConfig = vLifecycleConfig - } - vEndpointConfig := o.EndpointConfig - if vEndpointConfig == nil { - // note: explicitly not the empty object. - vEndpointConfig = &ClusterConfigEndpointConfig{} - } - if err := extractClusterConfigEndpointConfigFields(r, vEndpointConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEndpointConfig) { - o.EndpointConfig = vEndpointConfig - } - vGkeClusterConfig := o.GkeClusterConfig - if vGkeClusterConfig == nil { - // note: explicitly not the empty object. - vGkeClusterConfig = &ClusterConfigGkeClusterConfig{} - } - if err := extractClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { - o.GkeClusterConfig = vGkeClusterConfig - } - vMetastoreConfig := o.MetastoreConfig - if vMetastoreConfig == nil { - // note: explicitly not the empty object. - vMetastoreConfig = &ClusterConfigMetastoreConfig{} - } - if err := extractClusterConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { - o.MetastoreConfig = vMetastoreConfig - } - vDataprocMetricConfig := o.DataprocMetricConfig - if vDataprocMetricConfig == nil { - // note: explicitly not the empty object. - vDataprocMetricConfig = &ClusterConfigDataprocMetricConfig{} - } - if err := extractClusterConfigDataprocMetricConfigFields(r, vDataprocMetricConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDataprocMetricConfig) { - o.DataprocMetricConfig = vDataprocMetricConfig - } - return nil -} -func postReadExtractClusterConfigGceClusterConfigFields(r *Cluster, o *ClusterConfigGceClusterConfig) error { - vReservationAffinity := o.ReservationAffinity - if vReservationAffinity == nil { - // note: explicitly not the empty object. - vReservationAffinity = &ClusterConfigGceClusterConfigReservationAffinity{} - } - if err := extractClusterConfigGceClusterConfigReservationAffinityFields(r, vReservationAffinity); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vReservationAffinity) { - o.ReservationAffinity = vReservationAffinity - } - vNodeGroupAffinity := o.NodeGroupAffinity - if vNodeGroupAffinity == nil { - // note: explicitly not the empty object. - vNodeGroupAffinity = &ClusterConfigGceClusterConfigNodeGroupAffinity{} - } - if err := extractClusterConfigGceClusterConfigNodeGroupAffinityFields(r, vNodeGroupAffinity); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNodeGroupAffinity) { - o.NodeGroupAffinity = vNodeGroupAffinity - } - vShieldedInstanceConfig := o.ShieldedInstanceConfig - if vShieldedInstanceConfig == nil { - // note: explicitly not the empty object. - vShieldedInstanceConfig = &ClusterConfigGceClusterConfigShieldedInstanceConfig{} - } - if err := extractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r, vShieldedInstanceConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vShieldedInstanceConfig) { - o.ShieldedInstanceConfig = vShieldedInstanceConfig - } - vConfidentialInstanceConfig := o.ConfidentialInstanceConfig - if vConfidentialInstanceConfig == nil { - // note: explicitly not the empty object. - vConfidentialInstanceConfig = &ClusterConfigGceClusterConfigConfidentialInstanceConfig{} - } - if err := extractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r, vConfidentialInstanceConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfidentialInstanceConfig) { - o.ConfidentialInstanceConfig = vConfidentialInstanceConfig - } - return nil -} -func postReadExtractClusterConfigGceClusterConfigReservationAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigReservationAffinity) error { - return nil -} -func postReadExtractClusterConfigGceClusterConfigNodeGroupAffinityFields(r *Cluster, o *ClusterConfigGceClusterConfigNodeGroupAffinity) error { - return nil -} -func postReadExtractClusterConfigGceClusterConfigShieldedInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigShieldedInstanceConfig) error { - return nil -} -func postReadExtractClusterConfigGceClusterConfigConfidentialInstanceConfigFields(r *Cluster, o *ClusterConfigGceClusterConfigConfidentialInstanceConfig) error { - return nil -} -func postReadExtractClusterConfigMasterConfigFields(r *Cluster, o *ClusterConfigMasterConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigMasterConfigDiskConfig{} - } - if err := extractClusterConfigMasterConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigMasterConfigManagedGroupConfig{} - } - if err := extractClusterConfigMasterConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func postReadExtractClusterConfigMasterConfigDiskConfigFields(r *Cluster, o *ClusterConfigMasterConfigDiskConfig) error { - return nil -} -func postReadExtractClusterConfigMasterConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigMasterConfigManagedGroupConfig) error { - return nil -} -func postReadExtractClusterConfigMasterConfigAcceleratorsFields(r *Cluster, o *ClusterConfigMasterConfigAccelerators) error { - return nil -} -func postReadExtractClusterConfigMasterConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigMasterConfigInstanceReferences) error { - return nil -} -func postReadExtractClusterConfigWorkerConfigFields(r *Cluster, o *ClusterConfigWorkerConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigWorkerConfigDiskConfig{} - } - if err := extractClusterConfigWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigWorkerConfigManagedGroupConfig{} - } - if err := extractClusterConfigWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func postReadExtractClusterConfigWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigWorkerConfigDiskConfig) error { - return nil -} -func postReadExtractClusterConfigWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigWorkerConfigManagedGroupConfig) error { - return nil -} -func postReadExtractClusterConfigWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigWorkerConfigAccelerators) error { - return nil -} -func postReadExtractClusterConfigWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigWorkerConfigInstanceReferences) error { - return nil -} -func postReadExtractClusterConfigSecondaryWorkerConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfig) error { - vDiskConfig := o.DiskConfig - if vDiskConfig == nil { - // note: explicitly not the empty object. - vDiskConfig = &ClusterConfigSecondaryWorkerConfigDiskConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigDiskConfigFields(r, vDiskConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDiskConfig) { - o.DiskConfig = vDiskConfig - } - vManagedGroupConfig := o.ManagedGroupConfig - if vManagedGroupConfig == nil { - // note: explicitly not the empty object. - vManagedGroupConfig = &ClusterConfigSecondaryWorkerConfigManagedGroupConfig{} - } - if err := extractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r, vManagedGroupConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vManagedGroupConfig) { - o.ManagedGroupConfig = vManagedGroupConfig - } - return nil -} -func postReadExtractClusterConfigSecondaryWorkerConfigDiskConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigDiskConfig) error { - return nil -} -func postReadExtractClusterConfigSecondaryWorkerConfigManagedGroupConfigFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigManagedGroupConfig) error { - return nil -} -func postReadExtractClusterConfigSecondaryWorkerConfigAcceleratorsFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigAccelerators) error { - return nil -} -func postReadExtractClusterConfigSecondaryWorkerConfigInstanceReferencesFields(r *Cluster, o *ClusterConfigSecondaryWorkerConfigInstanceReferences) error { - return nil -} -func postReadExtractClusterConfigSoftwareConfigFields(r *Cluster, o *ClusterConfigSoftwareConfig) error { - return nil -} -func postReadExtractClusterConfigInitializationActionsFields(r *Cluster, o *ClusterConfigInitializationActions) error { - return nil -} -func postReadExtractClusterConfigEncryptionConfigFields(r *Cluster, o *ClusterConfigEncryptionConfig) error { - return nil -} -func postReadExtractClusterConfigAutoscalingConfigFields(r *Cluster, o *ClusterConfigAutoscalingConfig) error { - return nil -} -func postReadExtractClusterConfigSecurityConfigFields(r *Cluster, o *ClusterConfigSecurityConfig) error { - vKerberosConfig := o.KerberosConfig - if vKerberosConfig == nil { - // note: explicitly not the empty object. - vKerberosConfig = &ClusterConfigSecurityConfigKerberosConfig{} - } - if err := extractClusterConfigSecurityConfigKerberosConfigFields(r, vKerberosConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKerberosConfig) { - o.KerberosConfig = vKerberosConfig - } - vIdentityConfig := o.IdentityConfig - if vIdentityConfig == nil { - // note: explicitly not the empty object. - vIdentityConfig = &ClusterConfigSecurityConfigIdentityConfig{} - } - if err := extractClusterConfigSecurityConfigIdentityConfigFields(r, vIdentityConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vIdentityConfig) { - o.IdentityConfig = vIdentityConfig - } - return nil -} -func postReadExtractClusterConfigSecurityConfigKerberosConfigFields(r *Cluster, o *ClusterConfigSecurityConfigKerberosConfig) error { - return nil -} -func postReadExtractClusterConfigSecurityConfigIdentityConfigFields(r *Cluster, o *ClusterConfigSecurityConfigIdentityConfig) error { - return nil -} -func postReadExtractClusterConfigLifecycleConfigFields(r *Cluster, o *ClusterConfigLifecycleConfig) error { - return nil -} -func postReadExtractClusterConfigEndpointConfigFields(r *Cluster, o *ClusterConfigEndpointConfig) error { - return nil -} -func postReadExtractClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterConfigGkeClusterConfig) error { - vNamespacedGkeDeploymentTarget := o.NamespacedGkeDeploymentTarget - if vNamespacedGkeDeploymentTarget == nil { - // note: explicitly not the empty object. - vNamespacedGkeDeploymentTarget = &ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget{} - } - if err := extractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r, vNamespacedGkeDeploymentTarget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNamespacedGkeDeploymentTarget) { - o.NamespacedGkeDeploymentTarget = vNamespacedGkeDeploymentTarget - } - return nil -} -func postReadExtractClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetFields(r *Cluster, o *ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget) error { - return nil -} -func postReadExtractClusterConfigMetastoreConfigFields(r *Cluster, o *ClusterConfigMetastoreConfig) error { - return nil -} -func postReadExtractClusterConfigDataprocMetricConfigFields(r *Cluster, o *ClusterConfigDataprocMetricConfig) error { - return nil -} -func postReadExtractClusterConfigDataprocMetricConfigMetricsFields(r *Cluster, o *ClusterConfigDataprocMetricConfigMetrics) error { - return nil -} -func postReadExtractClusterStatusFields(r *Cluster, o *ClusterStatus) error { - return nil -} -func postReadExtractClusterStatusHistoryFields(r *Cluster, o *ClusterStatusHistory) error { - return nil -} -func postReadExtractClusterMetricsFields(r *Cluster, o *ClusterMetrics) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfig) error { - vKubernetesClusterConfig := o.KubernetesClusterConfig - if vKubernetesClusterConfig == nil { - // note: explicitly not the empty object. - vKubernetesClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigFields(r, vKubernetesClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesClusterConfig) { - o.KubernetesClusterConfig = vKubernetesClusterConfig - } - vAuxiliaryServicesConfig := o.AuxiliaryServicesConfig - if vAuxiliaryServicesConfig == nil { - // note: explicitly not the empty object. - vAuxiliaryServicesConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r, vAuxiliaryServicesConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuxiliaryServicesConfig) { - o.AuxiliaryServicesConfig = vAuxiliaryServicesConfig - } - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfig) error { - vGkeClusterConfig := o.GkeClusterConfig - if vGkeClusterConfig == nil { - // note: explicitly not the empty object. - vGkeClusterConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r, vGkeClusterConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeClusterConfig) { - o.GkeClusterConfig = vGkeClusterConfig - } - vKubernetesSoftwareConfig := o.KubernetesSoftwareConfig - if vKubernetesSoftwareConfig == nil { - // note: explicitly not the empty object. - vKubernetesSoftwareConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r, vKubernetesSoftwareConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesSoftwareConfig) { - o.KubernetesSoftwareConfig = vKubernetesSoftwareConfig - } - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget) error { - vNodePoolConfig := o.NodePoolConfig - if vNodePoolConfig == nil { - // note: explicitly not the empty object. - vNodePoolConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r, vNodePoolConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNodePoolConfig) { - o.NodePoolConfig = vNodePoolConfig - } - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig) error { - vConfig := o.Config - if vConfig == nil { - // note: explicitly not the empty object. - vConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r, vConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vConfig) { - o.Config = vConfig - } - vAutoscaling := o.Autoscaling - if vAutoscaling == nil { - // note: explicitly not the empty object. - vAutoscaling = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r, vAutoscaling); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAutoscaling) { - o.Autoscaling = vAutoscaling - } - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig) error { - vEphemeralStorageConfig := o.EphemeralStorageConfig - if vEphemeralStorageConfig == nil { - // note: explicitly not the empty object. - vEphemeralStorageConfig = &ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig{} - } - if err := extractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r, vEphemeralStorageConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEphemeralStorageConfig) { - o.EphemeralStorageConfig = vEphemeralStorageConfig - } - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAcceleratorsFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscalingFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfigFields(r *Cluster, o *ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfig) error { - vMetastoreConfig := o.MetastoreConfig - if vMetastoreConfig == nil { - // note: explicitly not the empty object. - vMetastoreConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r, vMetastoreConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetastoreConfig) { - o.MetastoreConfig = vMetastoreConfig - } - vSparkHistoryServerConfig := o.SparkHistoryServerConfig - if vSparkHistoryServerConfig == nil { - // note: explicitly not the empty object. - vSparkHistoryServerConfig = &ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig{} - } - if err := extractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r, vSparkHistoryServerConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkHistoryServerConfig) { - o.SparkHistoryServerConfig = vSparkHistoryServerConfig - } - return nil -} -func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig) error { - return nil -} -func postReadExtractClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfigFields(r *Cluster, o *ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig) error { - return nil -} diff --git a/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl deleted file mode 100644 index 962286ba33e1..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/cluster_schema.go.tmpl +++ /dev/null @@ -1,1941 +0,0 @@ -package dataproc - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLClusterSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataproc/Cluster", - Description: "The Dataproc Cluster resource", - StructName: "Cluster", - HasIAM: true, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Cluster": &dcl.Component{ - Title: "Cluster", - ID: "projects/{{ "{{" }}project{{ "}}" }}/regions/{{ "{{" }}location{{ "}}" }}/clusters/{{ "{{" }}name{{ "}}" }}", - UsesStateHint: true, - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - HasIAM: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "project", - "name", - "location", - }, - Properties: map[string]*dcl.Property{ - "clusterUuid": &dcl.Property{ - Type: "string", - GoName: "ClusterUuid", - ReadOnly: true, - Description: "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", - Immutable: true, - }, - "config": &dcl.Property{ - Type: "object", - GoName: "Config", - GoType: "ClusterConfig", - Description: "The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "autoscalingConfig": &dcl.Property{ - Type: "object", - GoName: "AutoscalingConfig", - GoType: "ClusterConfigAutoscalingConfig", - Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "policy": &dcl.Property{ - Type: "string", - GoName: "Policy", - Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Dataproc/AutoscalingPolicy", - Field: "name", - }, - }, - }, - }, - }, - "dataprocMetricConfig": &dcl.Property{ - Type: "object", - GoName: "DataprocMetricConfig", - GoType: "ClusterConfigDataprocMetricConfig", - Description: "Optional. The config for Dataproc metrics.", - Immutable: true, - Required: []string{ - "metrics", - }, - Properties: map[string]*dcl.Property{ - "metrics": &dcl.Property{ - Type: "array", - GoName: "Metrics", - Description: "Required. Metrics sources to enable.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigDataprocMetricConfigMetrics", - Required: []string{ - "metricSource", - }, - Properties: map[string]*dcl.Property{ - "metricOverrides": &dcl.Property{ - Type: "array", - GoName: "MetricOverrides", - Description: "Optional. Specify one or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course (for the `SPARK` metric source, any [Spark metric] (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified). Provide metrics in the following format: `METRIC_SOURCE:INSTANCE:GROUP:METRIC` Use camelcase as appropriate. Examples: ``` yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used ``` Notes: * Only the specified overridden metrics will be collected for the metric source. For example, if one or more `spark:executive` metrics are listed as metric overrides, other `SPARK` metrics will not be collected. The collection of the default metrics for other OSS metric sources is unaffected. For example, if both `SPARK` andd `YARN` metric sources are enabled, and overrides are provided for Spark metrics only, all default YARN metrics will be collected.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "metricSource": &dcl.Property{ - Type: "string", - GoName: "MetricSource", - GoType: "ClusterConfigDataprocMetricConfigMetricsMetricSourceEnum", - Description: "Required. Default metrics are collected unless `metricOverrides` are specified for the metric source (see [Available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) for more information). Possible values: METRIC_SOURCE_UNSPECIFIED, MONITORING_AGENT_DEFAULTS, HDFS, SPARK, YARN, SPARK_HISTORY_SERVER, HIVESERVER2", - Immutable: true, - Enum: []string{ - "METRIC_SOURCE_UNSPECIFIED", - "MONITORING_AGENT_DEFAULTS", - "HDFS", - "SPARK", - "YARN", - "SPARK_HISTORY_SERVER", - "HIVESERVER2", - }, - }, - }, - }, - }, - }, - }, - "encryptionConfig": &dcl.Property{ - Type: "object", - GoName: "EncryptionConfig", - GoType: "ClusterConfigEncryptionConfig", - Description: "Optional. Encryption settings for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "gcePdKmsKeyName": &dcl.Property{ - Type: "string", - GoName: "GcePdKmsKeyName", - Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudkms/CryptoKey", - Field: "selfLink", - }, - }, - }, - }, - }, - "endpointConfig": &dcl.Property{ - Type: "object", - GoName: "EndpointConfig", - GoType: "ClusterConfigEndpointConfig", - Description: "Optional. Port/endpoint configuration for this cluster", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "enableHttpPortAccess": &dcl.Property{ - Type: "boolean", - GoName: "EnableHttpPortAccess", - Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", - Immutable: true, - }, - "httpPorts": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "HttpPorts", - ReadOnly: true, - Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", - Immutable: true, - }, - }, - }, - "gceClusterConfig": &dcl.Property{ - Type: "object", - GoName: "GceClusterConfig", - GoType: "ClusterConfigGceClusterConfig", - Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "confidentialInstanceConfig": &dcl.Property{ - Type: "object", - GoName: "ConfidentialInstanceConfig", - GoType: "ClusterConfigGceClusterConfigConfidentialInstanceConfig", - Description: "Optional. Confidential Instance Config for clusters using [Confidential VMs](https://cloud.google.com/compute/confidential-vm/docs).", - Immutable: true, - Properties: map[string]*dcl.Property{ - "enableConfidentialCompute": &dcl.Property{ - Type: "boolean", - GoName: "EnableConfidentialCompute", - Description: "Optional. Defines whether the instance should have confidential compute enabled.", - Immutable: true, - }, - }, - }, - "internalIPOnly": &dcl.Property{ - Type: "boolean", - GoName: "InternalIPOnly", - Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", - Immutable: true, - ServerDefault: true, - }, - "metadata": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Metadata", - Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", - Immutable: true, - }, - "network": &dcl.Property{ - Type: "string", - GoName: "Network", - Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", - Immutable: true, - ServerDefault: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Network", - Field: "selfLink", - }, - }, - }, - "nodeGroupAffinity": &dcl.Property{ - Type: "object", - GoName: "NodeGroupAffinity", - GoType: "ClusterConfigGceClusterConfigNodeGroupAffinity", - Description: "Optional. Node Group Affinity for sole-tenant clusters.", - Immutable: true, - Required: []string{ - "nodeGroup", - }, - Properties: map[string]*dcl.Property{ - "nodeGroup": &dcl.Property{ - Type: "string", - GoName: "NodeGroup", - Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/NodeGroup", - Field: "selfLink", - }, - }, - }, - }, - }, - "privateIPv6GoogleAccess": &dcl.Property{ - Type: "string", - GoName: "PrivateIPv6GoogleAccess", - GoType: "ClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", - Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", - Immutable: true, - Enum: []string{ - "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", - "INHERIT_FROM_SUBNETWORK", - "OUTBOUND", - "BIDIRECTIONAL", - }, - }, - "reservationAffinity": &dcl.Property{ - Type: "object", - GoName: "ReservationAffinity", - GoType: "ClusterConfigGceClusterConfigReservationAffinity", - Description: "Optional. Reservation Affinity for consuming Zonal reservation.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "consumeReservationType": &dcl.Property{ - Type: "string", - GoName: "ConsumeReservationType", - GoType: "ClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", - Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", - Immutable: true, - Enum: []string{ - "TYPE_UNSPECIFIED", - "NO_RESERVATION", - "ANY_RESERVATION", - "SPECIFIC_RESERVATION", - }, - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "Optional. Corresponds to the label key of reservation resource.", - Immutable: true, - }, - "values": &dcl.Property{ - Type: "array", - GoName: "Values", - Description: "Optional. Corresponds to the label values of reservation resource.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "serviceAccount": &dcl.Property{ - Type: "string", - GoName: "ServiceAccount", - Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - }, - "serviceAccountScopes": &dcl.Property{ - Type: "array", - GoName: "ServiceAccountScopes", - Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "shieldedInstanceConfig": &dcl.Property{ - Type: "object", - GoName: "ShieldedInstanceConfig", - GoType: "ClusterConfigGceClusterConfigShieldedInstanceConfig", - Description: "Optional. Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).", - Immutable: true, - Properties: map[string]*dcl.Property{ - "enableIntegrityMonitoring": &dcl.Property{ - Type: "boolean", - GoName: "EnableIntegrityMonitoring", - Description: "Optional. Defines whether instances have integrity monitoring enabled.", - Immutable: true, - }, - "enableSecureBoot": &dcl.Property{ - Type: "boolean", - GoName: "EnableSecureBoot", - Description: "Optional. Defines whether instances have Secure Boot enabled.", - Immutable: true, - }, - "enableVtpm": &dcl.Property{ - Type: "boolean", - GoName: "EnableVtpm", - Description: "Optional. Defines whether instances have the vTPM enabled.", - Immutable: true, - }, - }, - }, - "subnetwork": &dcl.Property{ - Type: "string", - GoName: "Subnetwork", - Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Subnetwork", - Field: "selfLink", - }, - }, - }, - "tags": &dcl.Property{ - Type: "array", - GoName: "Tags", - Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", - Immutable: true, - SendEmpty: true, - ListType: "set", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "zone": &dcl.Property{ - Type: "string", - GoName: "Zone", - Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", - Immutable: true, - }, - }, - }, - "gkeClusterConfig": &dcl.Property{ - Type: "object", - GoName: "GkeClusterConfig", - GoType: "ClusterConfigGkeClusterConfig", - Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "namespacedGkeDeploymentTarget": &dcl.Property{ - Type: "object", - GoName: "NamespacedGkeDeploymentTarget", - GoType: "ClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", - Description: "Optional. A target for the deployment.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "clusterNamespace": &dcl.Property{ - Type: "string", - GoName: "ClusterNamespace", - Description: "Optional. A namespace within the GKE cluster to deploy into.", - Immutable: true, - }, - "targetGkeCluster": &dcl.Property{ - Type: "string", - GoName: "TargetGkeCluster", - Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "name", - }, - }, - }, - }, - }, - }, - }, - "initializationActions": &dcl.Property{ - Type: "array", - GoName: "InitializationActions", - Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigInitializationActions", - Required: []string{ - "executableFile", - }, - Properties: map[string]*dcl.Property{ - "executableFile": &dcl.Property{ - Type: "string", - GoName: "ExecutableFile", - Description: "Required. Cloud Storage URI of executable file.", - Immutable: true, - }, - "executionTimeout": &dcl.Property{ - Type: "string", - GoName: "ExecutionTimeout", - Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", - Immutable: true, - }, - }, - }, - }, - "lifecycleConfig": &dcl.Property{ - Type: "object", - GoName: "LifecycleConfig", - GoType: "ClusterConfigLifecycleConfig", - Description: "Optional. Lifecycle setting for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "autoDeleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "AutoDeleteTime", - Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "autoDeleteTtl": &dcl.Property{ - Type: "string", - GoName: "AutoDeleteTtl", - Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "idleDeleteTtl": &dcl.Property{ - Type: "string", - GoName: "IdleDeleteTtl", - Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "idleStartTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "IdleStartTime", - ReadOnly: true, - Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - }, - }, - "masterConfig": &dcl.Property{ - Type: "object", - GoName: "MasterConfig", - GoType: "ClusterConfigMasterConfig", - Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigMasterConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "ClusterConfigMasterConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "localSsdInterface": &dcl.Property{ - Type: "string", - GoName: "LocalSsdInterface", - Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "instanceReferences": &dcl.Property{ - Type: "array", - GoName: "InstanceReferences", - ReadOnly: true, - Description: "Output only. List of references to Compute Engine instances.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigMasterConfigInstanceReferences", - Properties: map[string]*dcl.Property{ - "instanceId": &dcl.Property{ - Type: "string", - GoName: "InstanceId", - Description: "The unique identifier of the Compute Engine instance.", - Immutable: true, - }, - "instanceName": &dcl.Property{ - Type: "string", - GoName: "InstanceName", - Description: "The user-friendly name of the Compute Engine instance.", - Immutable: true, - }, - "publicEciesKey": &dcl.Property{ - Type: "string", - GoName: "PublicEciesKey", - Description: "The public ECIES key used for sharing data with this instance.", - Immutable: true, - }, - "publicKey": &dcl.Property{ - Type: "string", - GoName: "PublicKey", - Description: "The public RSA key used for sharing data with this instance.", - Immutable: true, - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "ClusterConfigMasterConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "ClusterConfigMasterConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - }, - }, - }, - }, - "metastoreConfig": &dcl.Property{ - Type: "object", - GoName: "MetastoreConfig", - GoType: "ClusterConfigMetastoreConfig", - Description: "Optional. Metastore configuration.", - Immutable: true, - Required: []string{ - "dataprocMetastoreService", - }, - Properties: map[string]*dcl.Property{ - "dataprocMetastoreService": &dcl.Property{ - Type: "string", - GoName: "DataprocMetastoreService", - Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Metastore/Service", - Field: "selfLink", - }, - }, - }, - }, - }, - "secondaryWorkerConfig": &dcl.Property{ - Type: "object", - GoName: "SecondaryWorkerConfig", - GoType: "ClusterConfigSecondaryWorkerConfig", - Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigSecondaryWorkerConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "ClusterConfigSecondaryWorkerConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "localSsdInterface": &dcl.Property{ - Type: "string", - GoName: "LocalSsdInterface", - Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "instanceReferences": &dcl.Property{ - Type: "array", - GoName: "InstanceReferences", - ReadOnly: true, - Description: "Output only. List of references to Compute Engine instances.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigSecondaryWorkerConfigInstanceReferences", - Properties: map[string]*dcl.Property{ - "instanceId": &dcl.Property{ - Type: "string", - GoName: "InstanceId", - Description: "The unique identifier of the Compute Engine instance.", - Immutable: true, - }, - "instanceName": &dcl.Property{ - Type: "string", - GoName: "InstanceName", - Description: "The user-friendly name of the Compute Engine instance.", - Immutable: true, - }, - "publicEciesKey": &dcl.Property{ - Type: "string", - GoName: "PublicEciesKey", - Description: "The public ECIES key used for sharing data with this instance.", - Immutable: true, - }, - "publicKey": &dcl.Property{ - Type: "string", - GoName: "PublicKey", - Description: "The public RSA key used for sharing data with this instance.", - Immutable: true, - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "ClusterConfigSecondaryWorkerConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "ClusterConfigSecondaryWorkerConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - }, - }, - }, - }, - "securityConfig": &dcl.Property{ - Type: "object", - GoName: "SecurityConfig", - GoType: "ClusterConfigSecurityConfig", - Description: "Optional. Security settings for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "identityConfig": &dcl.Property{ - Type: "object", - GoName: "IdentityConfig", - GoType: "ClusterConfigSecurityConfigIdentityConfig", - Description: "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings.", - Immutable: true, - Required: []string{ - "userServiceAccountMapping", - }, - Properties: map[string]*dcl.Property{ - "userServiceAccountMapping": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "UserServiceAccountMapping", - Description: "Required. Map of user to service account.", - Immutable: true, - }, - }, - }, - "kerberosConfig": &dcl.Property{ - Type: "object", - GoName: "KerberosConfig", - GoType: "ClusterConfigSecurityConfigKerberosConfig", - Description: "Optional. Kerberos related configuration.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "crossRealmTrustAdminServer": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustAdminServer", - Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - Immutable: true, - }, - "crossRealmTrustKdc": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustKdc", - Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - Immutable: true, - }, - "crossRealmTrustRealm": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustRealm", - Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", - Immutable: true, - }, - "crossRealmTrustSharedPassword": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustSharedPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", - Immutable: true, - }, - "enableKerberos": &dcl.Property{ - Type: "boolean", - GoName: "EnableKerberos", - Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", - Immutable: true, - }, - "kdcDbKey": &dcl.Property{ - Type: "string", - GoName: "KdcDbKey", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", - Immutable: true, - }, - "keyPassword": &dcl.Property{ - Type: "string", - GoName: "KeyPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - "keystore": &dcl.Property{ - Type: "string", - GoName: "Keystore", - Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - Immutable: true, - }, - "keystorePassword": &dcl.Property{ - Type: "string", - GoName: "KeystorePassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - "kmsKey": &dcl.Property{ - Type: "string", - GoName: "KmsKey", - Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudkms/CryptoKey", - Field: "selfLink", - }, - }, - }, - "realm": &dcl.Property{ - Type: "string", - GoName: "Realm", - Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", - Immutable: true, - }, - "rootPrincipalPassword": &dcl.Property{ - Type: "string", - GoName: "RootPrincipalPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", - Immutable: true, - }, - "tgtLifetimeHours": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "TgtLifetimeHours", - Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", - Immutable: true, - }, - "truststore": &dcl.Property{ - Type: "string", - GoName: "Truststore", - Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - Immutable: true, - }, - "truststorePassword": &dcl.Property{ - Type: "string", - GoName: "TruststorePassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - }, - }, - }, - }, - "softwareConfig": &dcl.Property{ - Type: "object", - GoName: "SoftwareConfig", - GoType: "ClusterConfigSoftwareConfig", - Description: "Optional. The config settings for software inside the cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "imageVersion": &dcl.Property{ - Type: "string", - GoName: "ImageVersion", - Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", - Immutable: true, - }, - "optionalComponents": &dcl.Property{ - Type: "array", - GoName: "OptionalComponents", - Description: "Optional. The set of components to activate on the cluster.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "ClusterConfigSoftwareConfigOptionalComponentsEnum", - Enum: []string{ - "COMPONENT_UNSPECIFIED", - "ANACONDA", - "DOCKER", - "DRUID", - "FLINK", - "HBASE", - "HIVE_WEBHCAT", - "JUPYTER", - "KERBEROS", - "PRESTO", - "RANGER", - "SOLR", - "ZEPPELIN", - "ZOOKEEPER", - }, - }, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - Immutable: true, - }, - }, - }, - "stagingBucket": &dcl.Property{ - Type: "string", - GoName: "StagingBucket", - Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - Immutable: true, - ServerDefault: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Storage/Bucket", - Field: "name", - }, - }, - }, - "tempBucket": &dcl.Property{ - Type: "string", - GoName: "TempBucket", - Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - Immutable: true, - ServerDefault: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Storage/Bucket", - Field: "name", - }, - }, - }, - "workerConfig": &dcl.Property{ - Type: "object", - GoName: "WorkerConfig", - GoType: "ClusterConfigWorkerConfig", - Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigWorkerConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "ClusterConfigWorkerConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "localSsdInterface": &dcl.Property{ - Type: "string", - GoName: "LocalSsdInterface", - Description: "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See [local SSD performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "instanceReferences": &dcl.Property{ - Type: "array", - GoName: "InstanceReferences", - ReadOnly: true, - Description: "Output only. List of references to Compute Engine instances.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterConfigWorkerConfigInstanceReferences", - Properties: map[string]*dcl.Property{ - "instanceId": &dcl.Property{ - Type: "string", - GoName: "InstanceId", - Description: "The unique identifier of the Compute Engine instance.", - Immutable: true, - }, - "instanceName": &dcl.Property{ - Type: "string", - GoName: "InstanceName", - Description: "The user-friendly name of the Compute Engine instance.", - Immutable: true, - }, - "publicEciesKey": &dcl.Property{ - Type: "string", - GoName: "PublicEciesKey", - Description: "The public ECIES key used for sharing data with this instance.", - Immutable: true, - }, - "publicKey": &dcl.Property{ - Type: "string", - GoName: "PublicKey", - Description: "The public RSA key used for sharing data with this instance.", - Immutable: true, - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "ClusterConfigWorkerConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "ClusterConfigWorkerConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - }, - }, - }, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource, usually a GCP region.", - Immutable: true, - Parameter: true, - }, - "metrics": &dcl.Property{ - Type: "object", - GoName: "Metrics", - GoType: "ClusterMetrics", - ReadOnly: true, - Description: "Output only. Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "hdfsMetrics": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "HdfsMetrics", - Description: "The HDFS metrics.", - Immutable: true, - }, - "yarnMetrics": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "YarnMetrics", - Description: "The YARN metrics.", - Immutable: true, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused.", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "Required. The Google Cloud Platform project ID that the cluster belongs to.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "status": &dcl.Property{ - Type: "object", - GoName: "Status", - GoType: "ClusterStatus", - ReadOnly: true, - Description: "Output only. Cluster status.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "detail": &dcl.Property{ - Type: "string", - GoName: "Detail", - ReadOnly: true, - Description: "Optional. Output only. Details of cluster's state.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "ClusterStatusStateEnum", - ReadOnly: true, - Description: "Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING", - Immutable: true, - Enum: []string{ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "DELETING", - "UPDATING", - "STOPPING", - "STOPPED", - "STARTING", - }, - }, - "stateStartTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "StateStartTime", - ReadOnly: true, - Description: "Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "substate": &dcl.Property{ - Type: "string", - GoName: "Substate", - GoType: "ClusterStatusSubstateEnum", - ReadOnly: true, - Description: "Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS", - Immutable: true, - Enum: []string{ - "UNSPECIFIED", - "UNHEALTHY", - "STALE_STATUS", - }, - }, - }, - }, - "statusHistory": &dcl.Property{ - Type: "array", - GoName: "StatusHistory", - ReadOnly: true, - Description: "Output only. The previous cluster status.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterStatusHistory", - Properties: map[string]*dcl.Property{ - "detail": &dcl.Property{ - Type: "string", - GoName: "Detail", - ReadOnly: true, - Description: "Optional. Output only. Details of cluster's state.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "ClusterStatusHistoryStateEnum", - ReadOnly: true, - Description: "Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING", - Immutable: true, - Enum: []string{ - "UNKNOWN", - "CREATING", - "RUNNING", - "ERROR", - "DELETING", - "UPDATING", - "STOPPING", - "STOPPED", - "STARTING", - }, - }, - "stateStartTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "StateStartTime", - ReadOnly: true, - Description: "Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "substate": &dcl.Property{ - Type: "string", - GoName: "Substate", - GoType: "ClusterStatusHistorySubstateEnum", - ReadOnly: true, - Description: "Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS", - Immutable: true, - Enum: []string{ - "UNSPECIFIED", - "UNHEALTHY", - "STALE_STATUS", - }, - }, - }, - }, - }, - "virtualClusterConfig": &dcl.Property{ - Type: "object", - GoName: "VirtualClusterConfig", - GoType: "ClusterVirtualClusterConfig", - Description: "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a [Dataproc-on-GKE cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.", - Immutable: true, - Required: []string{ - "kubernetesClusterConfig", - }, - Properties: map[string]*dcl.Property{ - "auxiliaryServicesConfig": &dcl.Property{ - Type: "object", - GoName: "AuxiliaryServicesConfig", - GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfig", - Description: "Optional. Configuration of auxiliary services used by this cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "metastoreConfig": &dcl.Property{ - Type: "object", - GoName: "MetastoreConfig", - GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfigMetastoreConfig", - Description: "Optional. The Hive Metastore configuration for this workload.", - Immutable: true, - Required: []string{ - "dataprocMetastoreService", - }, - Properties: map[string]*dcl.Property{ - "dataprocMetastoreService": &dcl.Property{ - Type: "string", - GoName: "DataprocMetastoreService", - Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Metastore/Service", - Field: "selfLink", - }, - }, - }, - }, - }, - "sparkHistoryServerConfig": &dcl.Property{ - Type: "object", - GoName: "SparkHistoryServerConfig", - GoType: "ClusterVirtualClusterConfigAuxiliaryServicesConfigSparkHistoryServerConfig", - Description: "Optional. The Spark History Server configuration for the workload.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "dataprocCluster": &dcl.Property{ - Type: "string", - GoName: "DataprocCluster", - Description: "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. Example: * `projects/[project_id]/regions/[region]/clusters/[cluster_name]`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Dataproc/Cluster", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - }, - "kubernetesClusterConfig": &dcl.Property{ - Type: "object", - GoName: "KubernetesClusterConfig", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfig", - Description: "Required. The configuration for running the Dataproc cluster on Kubernetes.", - Immutable: true, - Required: []string{ - "gkeClusterConfig", - }, - Properties: map[string]*dcl.Property{ - "gkeClusterConfig": &dcl.Property{ - Type: "object", - GoName: "GkeClusterConfig", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfig", - Description: "Required. The configuration for running the Dataproc cluster on GKE.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "gkeClusterTarget": &dcl.Property{ - Type: "string", - GoName: "GkeClusterTarget", - Description: "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "selfLink", - }, - }, - }, - "nodePoolTarget": &dcl.Property{ - Type: "array", - GoName: "NodePoolTarget", - Description: "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the `DEFAULT` GkeNodePoolTarget.Role. If a `GkeNodePoolTarget` is not specified, Dataproc constructs a `DEFAULT` `GkeNodePoolTarget`. Each role can be given to only one `GkeNodePoolTarget`. All node pools must have the same location settings.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTarget", - Required: []string{ - "nodePool", - "roles", - }, - Properties: map[string]*dcl.Property{ - "nodePool": &dcl.Property{ - Type: "string", - GoName: "NodePool", - Description: "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/NodePool", - Field: "selfLink", - }, - }, - }, - "nodePoolConfig": &dcl.Property{ - Type: "object", - GoName: "NodePoolConfig", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfig", - Description: "Input only. The configuration for the GKE node pool. If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail. If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values. This is an input only field. It will not be returned by the API.", - Immutable: true, - Unreadable: true, - Properties: map[string]*dcl.Property{ - "autoscaling": &dcl.Property{ - Type: "object", - GoName: "Autoscaling", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigAutoscaling", - Description: "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "maxNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxNodeCount", - Description: "The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. **Note:** Quota must be sufficient to scale up the cluster.", - Immutable: true, - }, - "minNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MinNodeCount", - Description: "The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.", - Immutable: true, - }, - }, - }, - "config": &dcl.Property{ - Type: "object", - GoName: "Config", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfig", - Description: "Optional. The node pool configuration.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. A list of [hardware accelerators](https://cloud.google.com/compute/docs/gpus) to attach to each node.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of accelerator cards exposed to an instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "The accelerator type resource namename (see GPUs on Compute Engine).", - Immutable: true, - }, - "gpuPartitionSize": &dcl.Property{ - Type: "string", - GoName: "GpuPartitionSize", - Description: "Size of partitions to create on the GPU. Valid values are described in the NVIDIA [mig user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", - Immutable: true, - }, - }, - }, - }, - "bootDiskKmsKey": &dcl.Property{ - Type: "string", - GoName: "BootDiskKmsKey", - Description: "Optional. The [Customer Managed Encryption Key (CMEK)] (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: `projects/KEY_PROJECT_ID/locations/LOCATION/keyRings/RING_NAME/cryptoKeys/KEY_NAME`.", - Immutable: true, - }, - "ephemeralStorageConfig": &dcl.Property{ - Type: "object", - GoName: "EphemeralStorageConfig", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetNodePoolConfigConfigEphemeralStorageConfig", - Description: "Optional. Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "localSsdCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "LocalSsdCount", - Description: "Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage.", - Immutable: true, - }, - }, - }, - "localSsdCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "LocalSsdCount", - Description: "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see [Adding Local SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The name of a Compute Engine [machine type](https://cloud.google.com/compute/docs/machine-types).", - Immutable: true, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. [Minimum CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", - Immutable: true, - }, - "preemptible": &dcl.Property{ - Type: "boolean", - GoName: "Preemptible", - Description: "Optional. Whether the nodes are created as legacy [preemptible VM instances] (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the `CONTROLLER` [role] (/dataproc/docs/reference/rest/v1/projects.regions.clusters#role) or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", - Immutable: true, - }, - "spot": &dcl.Property{ - Type: "boolean", - GoName: "Spot", - Description: "Optional. Whether the nodes are created as [Spot VM instances] (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the `CONTROLLER` [role](/dataproc/docs/reference/rest/v1/projects.regions.clusters#role) or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", - Immutable: true, - }, - }, - }, - "locations": &dcl.Property{ - Type: "array", - GoName: "Locations", - Description: "Optional. The list of Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located. **Note:** All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region. If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "roles": &dcl.Property{ - Type: "array", - GoName: "Roles", - Description: "Required. The roles associated with the GKE node pool.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigGkeClusterConfigNodePoolTargetRolesEnum", - Enum: []string{ - "ROLE_UNSPECIFIED", - "DEFAULT", - "CONTROLLER", - "SPARK_DRIVER", - "SPARK_EXECUTOR", - }, - }, - }, - }, - }, - }, - }, - }, - "kubernetesNamespace": &dcl.Property{ - Type: "string", - GoName: "KubernetesNamespace", - Description: "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", - Immutable: true, - }, - "kubernetesSoftwareConfig": &dcl.Property{ - Type: "object", - GoName: "KubernetesSoftwareConfig", - GoType: "ClusterVirtualClusterConfigKubernetesClusterConfigKubernetesSoftwareConfig", - Description: "Optional. The software configuration for this Dataproc cluster running on Kubernetes.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "componentVersion": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "ComponentVersion", - Description: "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `spark:spark.kubernetes.container.image`. The following are supported prefixes and their mappings: * spark: `spark-defaults.conf` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - Immutable: true, - }, - }, - }, - }, - }, - "stagingBucket": &dcl.Property{ - Type: "string", - GoName: "StagingBucket", - Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a `gs://...` URI to a Cloud Storage bucket.**", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Storage/Bucket", - Field: "name", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl b/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl deleted file mode 100644 index ab646deddc6f..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/dataproc_utils.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package dataproc - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func encodeJobCreateRequest(m map[string]any) map[string]any { - req := make(map[string]any, 1) - dcl.PutMapEntry(req, []string{"job"}, m) - return req -} - -{{- if eq $.TargetVersionName "ga" }} -/* -{{- end }} -func expandClusterProject(_ *Client, project *string, _ *Cluster) (*string, error) { - return dcl.SelfLinkToName(project), nil -} - -// CompareClusterConfigMasterConfigNewStyle exposes the compareClusterConfigMasterConfigNewStyle function for testing. -func CompareClusterConfigMasterConfigNewStyle(d, a any, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - return compareClusterConfigMasterConfigNewStyle(d, a, fn) -} -{{- if eq $.TargetVersionName "ga" }} -*/ -{{- end }} - -func canonicalizeSoftwareConfigProperties(o, n any) bool { - // This field is a map that contains both client provided and server provided values. It - // is also immutable, so always return "no diff". - return true -} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl index f27b46655afe..2ca6fad78138 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template.go.tmpl @@ -2216,7 +2216,7 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -2267,7 +2267,7 @@ func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -2361,7 +2361,7 @@ func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interfa if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl index 430e90a47060..b392feaaf5d2 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_generated_test.go.tmpl @@ -3,20 +3,15 @@ package firebaserules_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" -{{- else }} - firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccFirebaserulesRelease_FirestoreReleaseAdditionalHandWritten(t *testing.T) { @@ -171,7 +166,7 @@ func testAccCheckFirebaserulesReleaseDestroyProducer(t *testing.T) func(s *terra UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), } - client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + client := firebaserules.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) _, err := client.GetRelease(context.Background(), obj) if err == nil { return fmt.Errorf("google_firebaserules_release still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl index ee37076f9796..79cc2ef28871 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl @@ -3,20 +3,15 @@ package firebaserules_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" -{{- else }} - firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccFirebaserulesRuleset_BasicRuleset(t *testing.T) { @@ -128,7 +123,7 @@ func testAccCheckFirebaserulesRulesetDestroyProducer(t *testing.T) func(s *terra Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), } - client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) + client := firebaserules.NewDCLFirebaserulesClient(config, config.UserAgent, billingProject, 0) _, err := client.GetRuleset(context.Background(), obj) if err == nil { return fmt.Errorf("google_firebaserules_ruleset still exists %v", obj) diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl index 150a0fdd4f17..b551a9291458 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership.go.tmpl @@ -10,15 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" {{- end }} "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub" -{{- else }} - gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub/beta" -{{- end }} - - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -785,7 +777,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac return err } - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.String(d.Get("feature").(string)), Location: dcl.String(d.Get("location").(string)), Membership: dcl.String(d.Get("membership").(string)), @@ -807,7 +799,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := tpgdclresource.CreateDirective + directive := dcl.CreateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -817,7 +809,7 @@ func resourceGkeHubFeatureMembershipCreate(d *schema.ResourceData, meta interfac if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -846,7 +838,7 @@ func resourceGkeHubFeatureMembershipRead(d *schema.ResourceData, meta interface{ return err } - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.String(d.Get("feature").(string)), Location: dcl.String(d.Get("location").(string)), Membership: dcl.String(d.Get("membership").(string)), @@ -866,7 +858,7 @@ func resourceGkeHubFeatureMembershipRead(d *schema.ResourceData, meta interface{ if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -876,7 +868,7 @@ func resourceGkeHubFeatureMembershipRead(d *schema.ResourceData, meta interface{ res, err := client.GetFeatureMembership(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("GkeHubFeatureMembership %q", d.Id()) - return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + return dcl.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("feature", res.Feature); err != nil { @@ -913,7 +905,7 @@ func resourceGkeHubFeatureMembershipUpdate(d *schema.ResourceData, meta interfac return err } - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.String(d.Get("feature").(string)), Location: dcl.String(d.Get("location").(string)), Membership: dcl.String(d.Get("membership").(string)), @@ -930,7 +922,7 @@ func resourceGkeHubFeatureMembershipUpdate(d *schema.ResourceData, meta interfac transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) - directive := tpgdclresource.UpdateDirective + directive := dcl.UpdateDirective userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -941,7 +933,7 @@ func resourceGkeHubFeatureMembershipUpdate(d *schema.ResourceData, meta interfac if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -970,7 +962,7 @@ func resourceGkeHubFeatureMembershipDelete(d *schema.ResourceData, meta interfac return err } - obj := &gkehub.FeatureMembership{ + obj := &FeatureMembership{ Feature: dcl.String(d.Get("feature").(string)), Location: dcl.String(d.Get("location").(string)), Membership: dcl.String(d.Get("membership").(string)), @@ -997,7 +989,7 @@ func resourceGkeHubFeatureMembershipDelete(d *schema.ResourceData, meta interfac if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := transport_tpg.NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + client := NewDCLGkeHubClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) @@ -1033,25 +1025,25 @@ func resourceGkeHubFeatureMembershipImport(d *schema.ResourceData, meta interfac return []*schema.ResourceData{d}, nil } -func expandGkeHubFeatureMembershipConfigmanagement(o interface{}) *gkehub.FeatureMembershipConfigmanagement { +func expandGkeHubFeatureMembershipConfigmanagement(o interface{}) *FeatureMembershipConfigmanagement { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagement + return EmptyFeatureMembershipConfigmanagement } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagement + return EmptyFeatureMembershipConfigmanagement } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagement{ + return &FeatureMembershipConfigmanagement{ ConfigSync: expandGkeHubFeatureMembershipConfigmanagementConfigSync(obj["config_sync"]), HierarchyController: expandGkeHubFeatureMembershipConfigmanagementHierarchyController(obj["hierarchy_controller"]), - Management: gkehub.FeatureMembershipConfigmanagementManagementEnumRef(obj["management"].(string)), + Management: FeatureMembershipConfigmanagementManagementEnumRef(obj["management"].(string)), PolicyController: expandGkeHubFeatureMembershipConfigmanagementPolicyController(obj["policy_controller"]), Version: dcl.StringOrNil(obj["version"].(string)), } } -func flattenGkeHubFeatureMembershipConfigmanagement(obj *gkehub.FeatureMembershipConfigmanagement) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagement(obj *FeatureMembershipConfigmanagement) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1067,16 +1059,16 @@ func flattenGkeHubFeatureMembershipConfigmanagement(obj *gkehub.FeatureMembershi } -func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSync { +func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *FeatureMembershipConfigmanagementConfigSync { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSync + return EmptyFeatureMembershipConfigmanagementConfigSync } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSync + return EmptyFeatureMembershipConfigmanagementConfigSync } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementConfigSync{ + return &FeatureMembershipConfigmanagementConfigSync{ DeploymentOverrides: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(obj["deployment_overrides"]), Enabled: dcl.Bool(obj["enabled"].(bool)), Git: expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj["git"]), @@ -1088,7 +1080,7 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSync(o interface{}) *gke } } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *gkehub.FeatureMembershipConfigmanagementConfigSync) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *FeatureMembershipConfigmanagementConfigSync) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1106,17 +1098,17 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSync(obj *gkehub.Featur return []interface{}{transformed} } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(o interface{}) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { if o == nil { - return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0) } - items := make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(objs)) + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides, 0, len(objs)) for _, item := range objs { i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(item) items = append(items, *i) @@ -1125,20 +1117,20 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesA return items } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(o interface{}) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides } obj := o.(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{ + return &FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides{ Containers: expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(obj["containers"]), DeploymentName: dcl.String(obj["deployment_name"].(string)), DeploymentNamespace: dcl.String(obj["deployment_namespace"].(string)), } } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(objs []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesArray(objs []FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) []interface{} { if objs == nil { return nil } @@ -1152,7 +1144,7 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides return items } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides(obj *FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1165,17 +1157,17 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides return transformed } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(o interface{}) []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { if o == nil { - return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) + return make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0) } - items := make([]gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(objs)) + items := make([]FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers, 0, len(objs)) for _, item := range objs { i := expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(item) items = append(items, *i) @@ -1184,13 +1176,13 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesC return items } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(o interface{}) *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers + return EmptyFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers } obj := o.(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{ + return &FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers{ ContainerName: dcl.String(obj["container_name"].(string)), CpuLimit: dcl.String(obj["cpu_limit"].(string)), CpuRequest: dcl.String(obj["cpu_request"].(string)), @@ -1199,7 +1191,7 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesC } } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(objs []gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainersArray(objs []FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) []interface{} { if objs == nil { return nil } @@ -1213,7 +1205,7 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides return items } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers(obj *FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1229,16 +1221,16 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncDeploymentOverrides } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncGit { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(o interface{}) *FeatureMembershipConfigmanagementConfigSyncGit { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncGit + return EmptyFeatureMembershipConfigmanagementConfigSyncGit } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncGit + return EmptyFeatureMembershipConfigmanagementConfigSyncGit } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementConfigSyncGit{ + return &FeatureMembershipConfigmanagementConfigSyncGit{ GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), HttpsProxy: dcl.String(obj["https_proxy"].(string)), PolicyDir: dcl.String(obj["policy_dir"].(string)), @@ -1250,7 +1242,7 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSyncGit(o interface{}) * } } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncGit) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj *FeatureMembershipConfigmanagementConfigSyncGit) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1269,16 +1261,16 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncGit(obj *gkehub.Fea } -func expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(o interface{}) *gkehub.FeatureMembershipConfigmanagementConfigSyncOci { +func expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(o interface{}) *FeatureMembershipConfigmanagementConfigSyncOci { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncOci + return EmptyFeatureMembershipConfigmanagementConfigSyncOci } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementConfigSyncOci + return EmptyFeatureMembershipConfigmanagementConfigSyncOci } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementConfigSyncOci{ + return &FeatureMembershipConfigmanagementConfigSyncOci{ GcpServiceAccountEmail: dcl.String(obj["gcp_service_account_email"].(string)), PolicyDir: dcl.String(obj["policy_dir"].(string)), SecretType: dcl.String(obj["secret_type"].(string)), @@ -1287,7 +1279,7 @@ func expandGkeHubFeatureMembershipConfigmanagementConfigSyncOci(o interface{}) * } } -func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj *gkehub.FeatureMembershipConfigmanagementConfigSyncOci) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj *FeatureMembershipConfigmanagementConfigSyncOci) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1303,23 +1295,23 @@ func flattenGkeHubFeatureMembershipConfigmanagementConfigSyncOci(obj *gkehub.Fea } -func expandGkeHubFeatureMembershipConfigmanagementHierarchyController(o interface{}) *gkehub.FeatureMembershipConfigmanagementHierarchyController { +func expandGkeHubFeatureMembershipConfigmanagementHierarchyController(o interface{}) *FeatureMembershipConfigmanagementHierarchyController { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementHierarchyController + return EmptyFeatureMembershipConfigmanagementHierarchyController } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementHierarchyController + return EmptyFeatureMembershipConfigmanagementHierarchyController } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementHierarchyController{ + return &FeatureMembershipConfigmanagementHierarchyController{ EnableHierarchicalResourceQuota: dcl.Bool(obj["enable_hierarchical_resource_quota"].(bool)), EnablePodTreeLabels: dcl.Bool(obj["enable_pod_tree_labels"].(bool)), Enabled: dcl.Bool(obj["enabled"].(bool)), } } -func flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj *gkehub.FeatureMembershipConfigmanagementHierarchyController) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj *FeatureMembershipConfigmanagementHierarchyController) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1333,19 +1325,19 @@ func flattenGkeHubFeatureMembershipConfigmanagementHierarchyController(obj *gkeh } -func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyController { +func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{}) *FeatureMembershipConfigmanagementPolicyController { if o == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementPolicyController + return EmptyFeatureMembershipConfigmanagementPolicyController } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipConfigmanagementPolicyController + return EmptyFeatureMembershipConfigmanagementPolicyController } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementPolicyController{ + return &FeatureMembershipConfigmanagementPolicyController{ AuditIntervalSeconds: dcl.String(obj["audit_interval_seconds"].(string)), Enabled: dcl.Bool(obj["enabled"].(bool)), - ExemptableNamespaces: tpgdclresource.ExpandStringArray(obj["exemptable_namespaces"]), + ExemptableNamespaces: dcl.ExpandStringArray(obj["exemptable_namespaces"]), LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), Monitoring: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj["monitoring"]), MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), @@ -1354,7 +1346,7 @@ func expandGkeHubFeatureMembershipConfigmanagementPolicyController(o interface{} } } -func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub.FeatureMembershipConfigmanagementPolicyController) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *FeatureMembershipConfigmanagementPolicyController) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1373,7 +1365,7 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyController(obj *gkehub. } -func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring { +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o interface{}) *FeatureMembershipConfigmanagementPolicyControllerMonitoring { if o == nil { return nil } @@ -1382,12 +1374,12 @@ func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(o i return nil } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring{ + return &FeatureMembershipConfigmanagementPolicyControllerMonitoring{ Backends: expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj["backends"]), } } -func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(obj *FeatureMembershipConfigmanagementPolicyControllerMonitoring) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1399,22 +1391,22 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoring(ob } -func expandGkeHubFeatureMembershipMesh(o interface{}) *gkehub.FeatureMembershipMesh { +func expandGkeHubFeatureMembershipMesh(o interface{}) *FeatureMembershipMesh { if o == nil { - return gkehub.EmptyFeatureMembershipMesh + return EmptyFeatureMembershipMesh } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipMesh + return EmptyFeatureMembershipMesh } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipMesh{ - ControlPlane: gkehub.FeatureMembershipMeshControlPlaneEnumRef(obj["control_plane"].(string)), - Management: gkehub.FeatureMembershipMeshManagementEnumRef(obj["management"].(string)), + return &FeatureMembershipMesh{ + ControlPlane: FeatureMembershipMeshControlPlaneEnumRef(obj["control_plane"].(string)), + Management: FeatureMembershipMeshManagementEnumRef(obj["management"].(string)), } } -func flattenGkeHubFeatureMembershipMesh(obj *gkehub.FeatureMembershipMesh) interface{} { +func flattenGkeHubFeatureMembershipMesh(obj *FeatureMembershipMesh) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1427,22 +1419,22 @@ func flattenGkeHubFeatureMembershipMesh(obj *gkehub.FeatureMembershipMesh) inter } -func expandGkeHubFeatureMembershipPolicycontroller(o interface{}) *gkehub.FeatureMembershipPolicycontroller { +func expandGkeHubFeatureMembershipPolicycontroller(o interface{}) *FeatureMembershipPolicycontroller { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontroller + return EmptyFeatureMembershipPolicycontroller } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipPolicycontroller + return EmptyFeatureMembershipPolicycontroller } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontroller{ + return &FeatureMembershipPolicycontroller{ PolicyControllerHubConfig: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj["policy_controller_hub_config"]), Version: dcl.StringOrNil(obj["version"].(string)), } } -func flattenGkeHubFeatureMembershipPolicycontroller(obj *gkehub.FeatureMembershipPolicycontroller) interface{} { +func flattenGkeHubFeatureMembershipPolicycontroller(obj *FeatureMembershipPolicycontroller) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1455,21 +1447,21 @@ func flattenGkeHubFeatureMembershipPolicycontroller(obj *gkehub.FeatureMembershi } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfig { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfig } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfig{ AuditIntervalSeconds: dcl.Int64OrNil(int64(obj["audit_interval_seconds"].(int))), ConstraintViolationLimit: dcl.Int64(int64(obj["constraint_violation_limit"].(int))), DeploymentConfigs: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(obj["deployment_configs"]), - ExemptableNamespaces: tpgdclresource.ExpandStringArray(obj["exemptable_namespaces"]), - InstallSpec: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(obj["install_spec"].(string)), + ExemptableNamespaces: dcl.ExpandStringArray(obj["exemptable_namespaces"]), + InstallSpec: FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnumRef(obj["install_spec"].(string)), LogDeniesEnabled: dcl.Bool(obj["log_denies_enabled"].(bool)), Monitoring: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj["monitoring"]), MutationEnabled: dcl.Bool(obj["mutation_enabled"].(bool)), @@ -1478,7 +1470,7 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(o in } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfig) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfig) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1499,7 +1491,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfig(obj } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(o interface{}) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { if o == nil { return nil } @@ -1511,7 +1503,7 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeplo return nil } - items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) for _, item := range objs { i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(item) if item != nil { @@ -1522,21 +1514,21 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeplo return items } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs { if o == nil { return nil } obj := o.(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs{ ContainerResources: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj["container_resources"]), - PodAffinity: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(obj["pod_affinity"].(string)), + PodAffinity: FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnumRef(obj["pod_affinity"].(string)), PodTolerations: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(obj["pod_tolerations"]), ReplicaCount: dcl.Int64(int64(obj["replica_count"].(int))), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsMap(objs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs) []interface{} { if objs == nil { return nil } @@ -1550,7 +1542,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl return items } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, name string) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs, name string) interface{} { if obj == nil { return nil } @@ -1567,22 +1559,22 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources{ Limits: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj["limits"]), Requests: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj["requests"]), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1595,22 +1587,22 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits{ Cpu: dcl.String(obj["cpu"].(string)), Memory: dcl.String(obj["memory"].(string)), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1623,22 +1615,22 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests } objArr := o.([]interface{}) if len(objArr) == 0 || objArr[0] == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests{ Cpu: dcl.String(obj["cpu"].(string)), Memory: dcl.String(obj["memory"].(string)), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1650,17 +1642,17 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl return []interface{}{transformed} } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(o interface{}) []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(o interface{}) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { if o == nil { - return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + return make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) } objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) + return make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0) } - items := make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(objs)) + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations, 0, len(objs)) for _, item := range objs { i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(item) items = append(items, *i) @@ -1669,13 +1661,13 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeplo return items } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations } obj := o.(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations{ Effect: dcl.String(obj["effect"].(string)), Key: dcl.String(obj["key"].(string)), Operator: dcl.String(obj["operator"].(string)), @@ -1683,7 +1675,7 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeplo } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(objs []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerationsArray(objs []FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) []interface{} { if objs == nil { return nil } @@ -1697,7 +1689,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl return items } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1712,7 +1704,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigDepl } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring { if o == nil { return nil } @@ -1721,12 +1713,12 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonit return nil } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring{ Backends: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj["backends"]), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1738,7 +1730,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMoni } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent { if o == nil { return nil } @@ -1747,13 +1739,13 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolic return nil } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{ + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent{ Bundles: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(obj["bundles"]), TemplateLibrary: expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj["template_library"]), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1766,19 +1758,19 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(o interface{}) map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(o interface{}) map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { if o == nil { - return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + return make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) } o = o.(*schema.Set).List() objs := o.([]interface{}) if len(objs) == 0 || objs[0] == nil { - return make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + return make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) } - items := make(map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) + items := make(map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) for _, item := range objs { i := expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(item) if item != nil { @@ -1789,18 +1781,18 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolic return items } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles { if o == nil { - return gkehub.EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles + return EmptyFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles } obj := o.(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{ - ExemptedNamespaces: tpgdclresource.ExpandStringArray(obj["exempted_namespaces"]), + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles{ + ExemptedNamespaces: dcl.ExpandStringArray(obj["exempted_namespaces"]), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(objs map[string]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundlesMap(objs map[string]FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles) []interface{} { if objs == nil { return nil } @@ -1814,7 +1806,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli return items } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, name string) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles, name string) interface{} { if obj == nil { return nil } @@ -1828,7 +1820,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(o interface{}) *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(o interface{}) *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary { if o == nil { return nil } @@ -1837,12 +1829,12 @@ func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolic return nil } obj := objArr[0].(map[string]interface{}) - return &gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{ - Installation: gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(obj["installation"].(string)), + return &FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary{ + Installation: FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnumRef(obj["installation"].(string)), } } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj *gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary(obj *FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary) interface{} { if obj == nil || obj.Empty() { return nil } @@ -1854,7 +1846,7 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigPoli } -func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { +func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(obj []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum) interface{} { if obj == nil { return nil } @@ -1864,16 +1856,16 @@ func flattenGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBac } return items } -func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { +func expandGkeHubFeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsArray(o interface{}) []FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum { objs := o.([]interface{}) - items := make([]gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) + items := make([]FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum, 0, len(objs)) for _, item := range objs { - i := gkehub.FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) + i := FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnumRef(item.(string)) items = append(items, *i) } return items } -func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) interface{} { +func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(obj []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum) interface{} { if obj == nil { return nil } @@ -1883,11 +1875,11 @@ func flattenGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMoni } return items } -func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(o interface{}) []gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { +func expandGkeHubFeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsArray(o interface{}) []FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum { objs := o.([]interface{}) - items := make([]gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(objs)) + items := make([]FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum, 0, len(objs)) for _, item := range objs { - i := gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(item.(string)) + i := FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnumRef(item.(string)) items = append(items, *i) } return items diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl index eb6beb9f6f96..f99844c4c398 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl @@ -6,12 +6,13 @@ import ( "strings" "testing" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) @@ -1241,14 +1242,14 @@ resource "google_gke_hub_membership" "membership_acmoci" { func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) - obj := &FeatureMembership{ + obj := &gkehub.FeatureMembership{ Feature: dcl.StringOrNil(feature), Location: dcl.StringOrNil(location), Membership: dcl.StringOrNil(membership), Project: dcl.String(project), } - _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + _, err := gkehub.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) if err != nil { return err } @@ -1259,14 +1260,14 @@ func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, func testAccCheckGkeHubFeatureMembershipNotPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) - obj := &FeatureMembership{ + obj := &gkehub.FeatureMembership{ Feature: dcl.StringOrNil(feature), Location: dcl.StringOrNil(location), Membership: dcl.StringOrNil(membership), Project: dcl.String(project), } - _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + _, err := gkehub.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) if err == nil { return fmt.Errorf("Did not expect to find GKE Feature Membership for projects/%s/locations/%s/features/%s/membershipId/%s", project, location, feature, membership) } diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl index a7f3d8d1e71f..3e410201ed2e 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl @@ -3,20 +3,15 @@ package recaptchaenterprise_test import ( "context" "fmt" - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -{{- if eq $.TargetVersionName "ga" }} - recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" -{{- else }} - recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise/beta" -{{- end }} - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "strings" "testing" - + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccRecaptchaEnterpriseKey_AndroidKey(t *testing.T) { @@ -486,7 +481,7 @@ func testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t *testing.T) func(s *ter Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), } - client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, billingProject, 0) + client := recaptchaenterprise.NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, billingProject, 0) _, err := client.GetKey(context.Background(), obj) if err == nil { return fmt.Errorf("google_recaptcha_enterprise_key still exists %v", obj) From c398b0994050108e976d73dcee775efdc9bc3c13 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Tue, 17 Mar 2026 17:12:49 -0700 Subject: [PATCH 03/13] Move DCL resource docs to handwritten --- .../website/docs/r/apikeys_key.html.markdown | 274 ++++++ .../assured_workloads_workload.html.markdown | 319 +++++++ ...louddeploy_delivery_pipeline.html.markdown | 599 +++++++++++++ .../docs/r/clouddeploy_target.html.markdown | 362 ++++++++ .../r/container_aws_cluster.html.markdown | 638 ++++++++++++++ .../r/container_aws_node_pool.html.markdown | 822 ++++++++++++++++++ .../r/container_azure_client.html.markdown | 105 +++ .../r/container_azure_cluster.html.markdown | 425 +++++++++ .../r/container_azure_node_pool.html.markdown | 319 +++++++ .../docs/r/dataplex_asset.html.markdown | 270 ++++++ .../docs/r/dataplex_lake.html.markdown | 144 +++ .../docs/r/dataplex_zone.html.markdown | 224 +++++ .../r/firebaserules_release.html.markdown | 171 ++++ .../r/firebaserules_ruleset.html.markdown | 140 +++ .../r/recaptcha_enterprise_key.html.markdown | 319 +++++++ 15 files changed, 5131 insertions(+) create mode 100644 mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown create mode 100644 mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown diff --git a/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown b/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown new file mode 100644 index 000000000000..261ce7030cf0 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/apikeys_key.html.markdown @@ -0,0 +1,274 @@ +--- +subcategory: "Apikeys" +description: |- + The Apikeys Key resource +--- + +# google_apikeys_key + +The Apikeys Key resource + +## Example Usage - android_key +A basic example of a android api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app123" + sha1_fingerprint = "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + } +} + + +``` +## Example Usage - basic_key +A basic example of a api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + browser_key_restrictions { + allowed_referrers = [".*"] + } + } +} + + +``` +## Example Usage - ios_key +A basic example of a ios api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + ios_key_restrictions { + allowed_bundle_ids = ["com.google.app.macos"] + } + } +} + + +``` +## Example Usage - minimal_key +A minimal example of a api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" +} + + +``` +## Example Usage - server_key +A basic example of a server api keys key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + + restrictions { + api_targets { + service = "translate.googleapis.com" + methods = ["GET*"] + } + + server_key_restrictions { + allowed_ips = ["127.0.0.1"] + } + } +} + + +``` +## Example Usage - service_account_key +```hcl +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + project = google_project.project.project_id + service_account_email = google_service_account.key_service_account.email +} + +resource "google_project" "project" { + project_id = "app" + name = "app" + org_id = "123456789" + deletion_policy = "DELETE" +} + +resource "google_service_account" "key_service_account" { + account_id = "app" + project = google_project.project.project_id + display_name = "Test Service Account" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `display_name` - + (Optional) + Human-readable display name of this API key. Modifiable by user. + +* `project` - + (Optional) + The project for the resource + +* `restrictions` - + (Optional) + Key restrictions. + +* `service_account_email` - + (Optional) + The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details. + + + +The `restrictions` block supports: + +* `android_key_restrictions` - + (Optional) + The Android apps that are allowed to use the key. + +* `api_targets` - + (Optional) + A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed. + +* `browser_key_restrictions` - + (Optional) + The HTTP referrers (websites) that are allowed to use the key. + +* `ios_key_restrictions` - + (Optional) + The iOS apps that are allowed to use the key. + +* `server_key_restrictions` - + (Optional) + The IP addresses of callers that are allowed to use the key. + +The `android_key_restrictions` block supports: + +* `allowed_applications` - + (Required) + A list of Android applications that are allowed to make API calls with this key. + +The `allowed_applications` block supports: + +* `package_name` - + (Required) + The package name of the application. + +* `sha1_fingerprint` - + (Required) + The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter. + +The `api_targets` block supports: + +* `methods` - + (Optional) + Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*` + +* `service` - + (Required) + The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project. + +The `browser_key_restrictions` block supports: + +* `allowed_referrers` - + (Required) + A list of regular expressions for the referrer URLs that are allowed to make API calls with this key. + +The `ios_key_restrictions` block supports: + +* `allowed_bundle_ids` - + (Required) + A list of bundle IDs that are allowed when making API calls with this key. + +The `server_key_restrictions` block supports: + +* `allowed_ips` - + (Required) + A list of the caller IP addresses that are allowed to make API calls with this key. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/keys/{{name}}` + +* `key_string` - + Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method. + +* `uid` - + Output only. Unique id in UUID4 format. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Key can be imported using any of these accepted formats: +* `projects/{{project}}/locations/global/keys/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Key using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/global/keys/{{name}}" + to = google_apikeys_key.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Key can be imported using one of the formats above. For example: + +``` +$ terraform import google_apikeys_key.default projects/{{project}}/locations/global/keys/{{name}} +$ terraform import google_apikeys_key.default {{project}}/{{name}} +$ terraform import google_apikeys_key.default {{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown b/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown new file mode 100644 index 000000000000..c004c9e98c57 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/assured_workloads_workload.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "AssuredWorkloads" +description: |- + The AssuredWorkloads Workload resource +--- + +# google_assured_workloads_workload + +The AssuredWorkloads Workload resource + +## Example Usage - basic_workload +A basic test of a assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "FEDRAMP_MODERATE" + display_name = "{{display}}" + location = "us-west1" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + provisioned_resources_parent = "folders/519620126891" + + resource_settings { + display_name = "{{name}}" + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + workload_options { + kaj_enrollment_type = "KEY_ACCESS_TRANSPARENCY_OFF" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - sovereign_controls_workload +A Sovereign Controls test of the assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "EU_REGIONS_AND_SUPPORT" + display_name = "display" + location = "europe-west9" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + enable_sovereign_controls = true + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +``` +## Example Usage - split_billing_partner_workload +A Split billing partner test of the assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + compliance_regime = "ASSURED_WORKLOADS_FOR_PARTNERS" + display_name = "display" + location = "europe-west8" + organization = "123456789" + billing_account = "billingAccounts/000000-0000000-0000000-000000" + partner = "SOVEREIGN_CONTROLS_BY_PSN" + + partner_permissions { + assured_workloads_monitoring = true + data_logs_viewer = true + service_access_approver = true + } + + partner_services_billing_account = "billingAccounts/01BF3F-2C6DE5-30C607" + + resource_settings { + resource_type = "CONSUMER_FOLDER" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } + + violation_notifications_enabled = true + + labels = { + label-one = "value-one" + } + provider = google-beta +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `compliance_regime` - + (Required) + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075 + +* `display_name` - + (Required) + Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload + +* `location` - + (Required) + The location for the resource + +* `organization` - + (Required) + The organization for the resource + + + +- - - + +* `billing_account` - + (Optional) + Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. + +* `enable_sovereign_controls` - + (Optional) + Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers. + +* `kms_settings` - + (Optional) + **DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field. + +* `labels` - + (Optional) + Optional. Labels applied to the workload. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `partner` - + (Optional) + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM + +* `partner_permissions` - + (Optional) + Optional. Permissions granted to the AW Partner SA account for the customer workload + +* `partner_services_billing_account` - + (Optional) + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + +* `provisioned_resources_parent` - + (Optional) + Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} + +* `resource_settings` - + (Optional) + Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. + +* `violation_notifications_enabled` - + (Optional) + Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload. + +* `workload_options` - + (Optional) + Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads. + + + +The `kms_settings` block supports: + +* `next_rotation_time` - + (Required) + Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary. + +* `rotation_period` - + (Required) + Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours. + +The `partner_permissions` block supports: + +* `assured_workloads_monitoring` - + (Optional) + Optional. Allow partner to view violation alerts. + +* `data_logs_viewer` - + (Optional) + Allow the partner to view inspectability logs and monitoring violations. + +* `service_access_approver` - + (Optional) + Optional. Allow partner to view access approval logs. + +The `resource_settings` block supports: + +* `display_name` - + (Optional) + User-assigned resource display name. If not empty it will be used to create a resource with the specified name. + +* `resource_id` - + (Optional) + Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google. + +* `resource_type` - + (Optional) + Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER + +The `workload_options` block supports: + +* `kaj_enrollment_type` - + (Optional) + Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}` + +* `compliance_status` - + Output only. Count of active Violations in the Workload. + +* `compliant_but_disallowed_services` - + Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment. + +* `create_time` - + Output only. Immutable. The Workload creation timestamp. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `ekm_provisioning_response` - + Optional. Represents the Ekm Provisioning State of the given workload. + +* `kaj_enrollment_state` - + Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE + +* `name` - + Output only. The resource name of the workload. + +* `resources` - + Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only. + +* `saa_enrollment_response` - + Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Workload can be imported using any of these accepted formats: +* `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}` +* `{{organization}}/{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Workload using one of the formats above. For example: + + +```tf +import { + id = "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}" + to = google_assured_workloads_workload.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Workload can be imported using one of the formats above. For example: + +``` +$ terraform import google_assured_workloads_workload.default organizations/{{organization}}/locations/{{location}}/workloads/{{name}} +$ terraform import google_assured_workloads_workload.default {{organization}}/{{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown b/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown new file mode 100644 index 000000000000..7cb4d99ae05c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/clouddeploy_delivery_pipeline.html.markdown @@ -0,0 +1,599 @@ +--- +subcategory: "Cloud Deploy" +description: |- + The Cloud Deploy `DeliveryPipeline` resource +--- + +# google_clouddeploy_delivery_pipeline + +The Cloud Deploy `DeliveryPipeline` resource + +## Example Usage - canary_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - canary_service_networking_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - canaryrun_delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - delivery_pipeline +Creates a basic Cloud Deploy delivery pipeline +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +``` +## Example Usage - verify_delivery_pipeline +tests creating and updating a delivery pipeline with deployment verification strategy +```hcl +resource "google_clouddeploy_delivery_pipeline" "primary" { + location = "us-west1" + name = "pipeline" + description = "basic description" + project = "my-project-name" + + serial_pipeline { + stages { + deploy_parameters { + values = { + deployParameterKey = "deployParameterValue" + } + + match_target_labels = {} + } + + profiles = ["example-profile-one", "example-profile-two"] + target_id = "example-target-one" + } + + stages { + profiles = [] + target_id = "example-target-two" + } + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `annotations` - + (Optional) + User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `description` - + (Optional) + Description of the `DeliveryPipeline`. Max length is 255 characters. + +* `labels` - + (Optional) + Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + +* `serial_pipeline` - + (Optional) + SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`. + +* `suspended` - + (Optional) + When suspended, no new releases or rollouts can be created, but in-progress ones will complete. + + + +The `serial_pipeline` block supports: + +* `stages` - + (Optional) + Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow. + +The `stages` block supports: + +* `deploy_parameters` - + (Optional) + Optional. The deploy parameters to use for the target in this stage. + +* `profiles` - + (Optional) + Skaffold profiles to use when rendering the manifest for this stage's `Target`. + +* `strategy` - + (Optional) + Optional. The strategy to use for a `Rollout` to this stage. + +* `target_id` - + (Optional) + The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`. + +The `deploy_parameters` block supports: + +* `match_target_labels` - + (Optional) + Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target). + +* `values` - + (Required) + Required. Values are deploy parameters in key-value pairs. + +The `strategy` block supports: + +* `canary` - + (Optional) + Canary deployment strategy provides progressive percentage based deployments to a Target. + +* `standard` - + (Optional) + Standard deployment strategy executes a single deploy and allows verifying the deployment. + +The `canary` block supports: + +* `canary_deployment` - + (Optional) + Configures the progressive based deployment for a Target. + +* `custom_canary_deployment` - + (Optional) + Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments. + +* `runtime_config` - + (Optional) + Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment. + +The `canary_deployment` block supports: + +* `percentages` - + (Required) + Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100. + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present. + +* `verify` - + (Optional) + Whether to run verify tests after each percentage deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +The `custom_canary_deployment` block supports: + +* `phase_configs` - + (Required) + Required. Configuration for each phase in the canary deployment in the order executed. + +The `phase_configs` block supports: + +* `percentage` - + (Required) + Required. Percentage deployment for the phase. + +* `phase_id` - + (Required) + Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase. + +* `profiles` - + (Optional) + Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage. + +* `verify` - + (Optional) + Whether to run verify tests after the deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +The `runtime_config` block supports: + +* `cloud_run` - + (Optional) + Cloud Run runtime configuration. + +* `kubernetes` - + (Optional) + Kubernetes runtime configuration. + +The `cloud_run` block supports: + +* `automatic_traffic_control` - + (Optional) + Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments. + +* `canary_revision_tags` - + (Optional) + Optional. A list of tags that are added to the canary revision while the canary phase is in progress. + +* `prior_revision_tags` - + (Optional) + Optional. A list of tags that are added to the prior revision while the canary phase is in progress. + +* `stable_revision_tags` - + (Optional) + Optional. A list of tags that are added to the final stable revision when the stable phase is applied. + +The `kubernetes` block supports: + +* `gateway_service_mesh` - + (Optional) + Kubernetes Gateway API service mesh configuration. + +* `service_networking` - + (Optional) + Kubernetes Service networking configuration. + +The `gateway_service_mesh` block supports: + +* `deployment` - + (Required) + Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service. + +* `http_route` - + (Required) + Required. Name of the Gateway API HTTPRoute. + +* `pod_selector_label` - + (Optional) + Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources. + +* `route_destinations` - + (Optional) + Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster. + +* `route_update_wait_time` - + (Optional) + Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time. + +* `service` - + (Required) + Required. Name of the Kubernetes Service. + +* `stable_cutback_duration` - + (Optional) + Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time. + +The `route_destinations` block supports: + +* `destination_ids` - + (Required) + Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and "@self" to include the Target cluster. + +* `propagate_service` - + (Optional) + Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified. + +The `service_networking` block supports: + +* `deployment` - + (Required) + Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service. + +* `disable_pod_overprovisioning` - + (Optional) + Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster. + +* `pod_selector_label` - + (Optional) + Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment. + +* `service` - + (Required) + Required. Name of the Kubernetes Service. + +The `standard` block supports: + +* `postdeploy` - + (Optional) + Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present. + +* `predeploy` - + (Optional) + Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present. + +* `verify` - + (Optional) + Whether to verify a deployment. + +The `postdeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job. + +The `predeploy` block supports: + +* `actions` - + (Optional) + Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}` + +* `condition` - + Output only. Information around the state of the Delivery Pipeline. + +* `create_time` - + Output only. Time at which the pipeline was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `etag` - + This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. Unique identifier of the `DeliveryPipeline`. + +* `update_time` - + Output only. Most recent time at which the pipeline was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +DeliveryPipeline can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeliveryPipeline using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}" + to = google_clouddeploy_delivery_pipeline.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), DeliveryPipeline can be imported using one of the formats above. For example: + +``` +$ terraform import google_clouddeploy_delivery_pipeline.default projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}} +$ terraform import google_clouddeploy_delivery_pipeline.default {{project}}/{{location}}/{{name}} +$ terraform import google_clouddeploy_delivery_pipeline.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown b/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown new file mode 100644 index 000000000000..f756bbb18144 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/clouddeploy_target.html.markdown @@ -0,0 +1,362 @@ +--- +subcategory: "Cloud Deploy" +description: |- + The Cloud Deploy `Target` resource +--- + +# google_clouddeploy_target + +The Cloud Deploy `Target` resource + +## Example Usage - multi_target +tests creating and updating a multi-target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + deploy_parameters = {} + description = "multi-target description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + multi_target { + target_ids = ["1", "2"] + } + + project = "my-project-name" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - run_target +tests creating and updating a cloud run target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + deploy_parameters = {} + description = "basic description" + + execution_configs { + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" + } + + project = "my-project-name" + require_approval = false + + run { + location = "projects/my-project-name/locations/us-west1" + } + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +``` +## Example Usage - target +Creates a basic Cloud Deploy target +```hcl +resource "google_clouddeploy_target" "primary" { + location = "us-west1" + name = "target" + + deploy_parameters = { + deployParameterKey = "deployParameterValue" + } + + description = "basic description" + + gke { + cluster = "projects/my-project-name/locations/us-west1/clusters/example-cluster-name" + } + + project = "my-project-name" + require_approval = false + + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" + } + + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`. + + + +- - - + +* `annotations` - + (Optional) + Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `anthos_cluster` - + (Optional) + Information specifying an Anthos Cluster. + +* `associated_entities` - + (Optional) + Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + +* `custom_target` - + (Optional) + Optional. Information specifying a Custom Target. + +* `deploy_parameters` - + (Optional) + Optional. The deploy parameters to use for this target. + +* `description` - + (Optional) + Optional. Description of the `Target`. Max length is 255 characters. + +* `execution_configs` - + (Optional) + Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`. + +* `gke` - + (Optional) + Information specifying a GKE Cluster. + +* `labels` - + (Optional) + Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `multi_target` - + (Optional) + Information specifying a multiTarget. + +* `project` - + (Optional) + The project for the resource + +* `require_approval` - + (Optional) + Optional. Whether or not the `Target` requires approval. + +* `run` - + (Optional) + Information specifying a Cloud Run deployment target. + + + +The `anthos_cluster` block supports: + +* `membership` - + (Optional) + Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + +The `associated_entities` block supports: + +* `anthos_clusters` - + (Optional) + Optional. Information specifying Anthos clusters as associated entities. + +* `entity_id` - + (Required) + The name for the key in the map for which this object is mapped to in the API + +* `gke_clusters` - + (Optional) + Optional. Information specifying GKE clusters as associated entities. + +The `anthos_clusters` block supports: + +* `membership` - + (Optional) + Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`. + +The `gke_clusters` block supports: + +* `cluster` - + (Optional) + Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`. + +* `internal_ip` - + (Optional) + Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). + +* `proxy_url` - + (Optional) + Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. + +The `custom_target` block supports: + +* `custom_target_type` - + (Required) + Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`. + +The `execution_configs` block supports: + +* `artifact_storage` - + (Optional) + Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket ("gs://my-bucket") or a path within a bucket ("gs://my-bucket/my-dir"). If unspecified, a default bucket located in the same region will be used. + +* `execution_timeout` - + (Optional) + Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used. + +* `service_account` - + (Optional) + Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used. + +* `usages` - + (Required) + Required. Usages when this configuration should be applied. + +* `verbose` - + (Optional) + Optional. If true, additional logging will be enabled when running builds in this execution environment. + +* `worker_pool` - + (Optional) + Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used. + +The `gke` block supports: + +* `cluster` - + (Optional) + Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}. + +* `dns_endpoint` - + (Optional) + Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true. + +* `internal_ip` - + (Optional) + Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). + +* `proxy_url` - + (Optional) + Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server. + +The `multi_target` block supports: + +* `target_ids` - + (Required) + Required. The target_ids of this multiTarget. + +The `run` block supports: + +* `location` - + (Required) + Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/targets/{{name}}` + +* `create_time` - + Output only. Time at which the `Target` was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `etag` - + Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `target_id` - + Output only. Resource id of the `Target`. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. Unique identifier of the `Target`. + +* `update_time` - + Output only. Most recent time at which the `Target` was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Target can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/targets/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Target using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/targets/{{name}}" + to = google_clouddeploy_target.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Target can be imported using one of the formats above. For example: + +``` +$ terraform import google_clouddeploy_target.default projects/{{project}}/locations/{{location}}/targets/{{name}} +$ terraform import google_clouddeploy_target.default {{project}}/{{location}}/{{name}} +$ terraform import google_clouddeploy_target.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown new file mode 100644 index 000000000000..66fbf2744973 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_aws_cluster.html.markdown @@ -0,0 +1,638 @@ +--- +subcategory: "ContainerAws" +description: |- + An Anthos cluster running on AWS. +--- + +# google_container_aws_cluster + +An Anthos cluster running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_aws_cluster +A basic example of a containeraws cluster +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + admin_groups { + group = "group@domain.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +``` +## Example Usage - basic_enum_aws_cluster +A basic example of a containeraws cluster with lowercase enums +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_aws_cluster +A basic example of a containeraws cluster with lowercase enums (beta) +```hcl +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + + instance_placement { + tenancy = "dedicated" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Configuration related to the cluster RBAC settings. + +* `aws_region` - + (Required) + The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region. + +* `control_plane` - + (Required) + Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Cluster-wide networking configuration. + + + +The `authorization` block supports: + +* `admin_groups` - + (Optional) + Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +* `admin_users` - + (Required) + Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `control_plane` block supports: + +* `aws_services_authentication` - + (Required) + Authentication configuration for management of AWS resources. + +* `config_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster configuration. + +* `database_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster secrets. + +* `iam_instance_profile` - + (Required) + The name of the AWS IAM instance pofile to assign to each control plane replica. + +* `instance_placement` - + (Optional) + (Beta only) Details of placement information for an instance. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster. + +* `ssh_config` - + (Optional) + Optional. SSH configuration for how to access the underlying control plane machines. + +* `subnet_ids` - + (Required) + The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ). + +* `tags` - + (Optional) + Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `version` - + (Required) + The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling . + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `per_node_pool_sg_rules_disabled` - + (Optional) + Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools. + +* `pod_address_cidr_blocks` - + (Required) + All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `vpc_id` - + (Required) + The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `binary_authorization` - + (Optional) + Configuration options for the Binary Authorization feature. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `logging_config` - + (Optional) + (Beta only) Logging configuration. + +* `project` - + (Optional) + The project for the resource + + + +The `admin_groups` block supports: + +* `group` - + (Required) + The name of the group, e.g. `my-group@domain.com`. + +The `admin_users` block supports: + +* `username` - + (Required) + The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `binary_authorization` block supports: + +* `evaluation_mode` - + (Optional) + Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE + +The `aws_services_authentication` block supports: + +* `role_arn` - + (Required) + The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account. + +* `role_session_name` - + (Optional) + Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster configuration. + +The `database_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt cluster secrets. + +The `instance_placement` block supports: + +* `tenancy` - + (Optional) + The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST + +The `main_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `proxy_config` block supports: + +* `secret_arn` - + (Required) + The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +* `secret_version` - + (Required) + The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + The name of the EC2 key pair used to login into cluster machines. + +The `logging_config` block supports: + +* `component_config` - + (Optional) + Configuration of the logging components. + +The `component_config` block supports: + +* `enable_components` - + (Optional) + Components of the logging configuration to be enabled. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/awsClusters/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cluster using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}" + to = google_container_aws_cluster.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Cluster can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_aws_cluster.default projects/{{project}}/locations/{{location}}/awsClusters/{{name}} +$ terraform import google_container_aws_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_aws_cluster.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown new file mode 100644 index 000000000000..986f1f4b164d --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_aws_node_pool.html.markdown @@ -0,0 +1,822 @@ +--- +subcategory: "ContainerAws" +description: |- + An Anthos node pool running on AWS. +--- + +# google_container_aws_node_pool + +An Anthos node pool running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_aws_cluster +A basic example of a containeraws node pool +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + management { + auto_repair = true + } + + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + + project = "my-project-name" +} + +``` +## Example Usage - basic_enum_aws_cluster +A basic example of a containeraws node pool with lowercase enums +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_aws_cluster +A basic example of a containeraws node pool with lowercase enums (beta) +```hcl +data "google_container_aws_versions" "versions" { + provider = google-beta + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + provider = google-beta + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "gp3" + } + + security_group_ids = ["sg-00000000000000000"] + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "prefer_no_schedule" + key = "taint-key" + value = "taint-value" + } + + instance_placement { + tenancy = "dedicated" + } + + image_type = "ubuntu" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The awsCluster for the resource + +* `config` - + (Required) + The configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + The subnet where the node pool node run. + +* `version` - + (Required) + The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Maximum number of nodes in the NodePool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `autoscaling_metrics_collection` - + (Optional) + Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled. + +* `config_encryption` - + (Required) + The ARN of the AWS KMS key used to encrypt node pool configuration. + +* `iam_instance_profile` - + (Required) + The name of the AWS IAM role assigned to nodes in the pool. + +* `image_type` - + (Optional) + (Beta only) The OS image type to use on node pool instances. + +* `instance_placement` - + (Optional) + (Beta only) Details of placement information for an instance. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `m5.large`. + +* `labels` - + (Optional) + Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster. + +* `spot_config` - + (Optional) + (Beta only) Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type` + +* `ssh_config` - + (Optional) + Optional. The SSH configuration. + +* `tags` - + (Optional) + Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `taints` - + (Optional) + Optional. The initial taints assigned to nodes of this node pool. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `kubelet_config` - + (Optional) + The kubelet configuration for the node pool. + +* `management` - + (Optional) + The Management configuration for this node pool. + +* `project` - + (Optional) + The project for the resource + +* `update_settings` - + (Optional) + Optional. Update settings control the speed and disruption of the node pool update. + + + +The `autoscaling_metrics_collection` block supports: + +* `granularity` - + (Required) + The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is "1Minute". + +* `metrics` - + (Optional) + The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + The ARN of the AWS KMS key used to encrypt node pool configuration. + +The `instance_placement` block supports: + +* `tenancy` - + (Optional) + The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST + +The `proxy_config` block supports: + +* `secret_arn` - + (Required) + The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +* `secret_version` - + (Required) + The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `throughput` - + (Optional) + Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `spot_config` block supports: + +* `instance_types` - + (Required) + List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + The name of the EC2 key pair used to login into cluster machines. + +The `taints` block supports: + +* `effect` - + (Required) + The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE + +* `key` - + (Required) + Key for the taint. + +* `value` - + (Required) + Value for the taint. + +The `kubelet_config` block supports: + +* `cpu_cfs_quota` - + (Optional) + Whether or not to enable CPU CFS quota. Defaults to true. + +* `cpu_cfs_quota_period` - + (Optional) + Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + +* `cpu_manager_policy` - + (Optional) + The CpuManagerPolicy to use for the node. Defaults to "none". + +* `pod_pids_limit` - + (Optional) + Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + +The `management` block supports: + +* `auto_repair` - + (Optional) + Optional. Whether or not the nodes will be automatically repaired. + +The `update_settings` block supports: + +* `surge_settings` - + (Optional) + Optional. Settings for surge update. + +The `surge_settings` block supports: + +* `max_surge` - + (Optional) + Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process. + +* `max_unavailable` - + (Optional) + Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the node pool. + +* `state` - + Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}` +* `{{project}}/{{location}}/{{cluster}}/{{name}}` +* `{{location}}/{{cluster}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import NodePool using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}" + to = google_container_aws_node_pool.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), NodePool can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_aws_node_pool.default projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}} +$ terraform import google_container_aws_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_aws_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown new file mode 100644 index 000000000000..fce523a2c3e2 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_client.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "ContainerAzure" +description: |- + AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. +--- + +# google_container_azure_client + +AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_client +A basic example of a containerazure azure client +```hcl +resource "google_container_azure_client" "primary" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `application_id` - + (Required) + The Azure Active Directory Application ID. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `tenant_id` - + (Required) + The Azure Active Directory Tenant ID. + + + +- - - + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClients/{{name}}` + +* `certificate` - + Output only. The PEM encoded x509 certificate. + +* `create_time` - + Output only. The time at which this resource was created. + +* `uid` - + Output only. A globally unique identifier for the client. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Client can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClients/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Client using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClients/{{name}}" + to = google_container_azure_client.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Client can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_client.default projects/{{project}}/locations/{{location}}/azureClients/{{name}} +$ terraform import google_container_azure_client.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_client.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown new file mode 100644 index 000000000000..091b0c590615 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_cluster.html.markdown @@ -0,0 +1,425 @@ +--- +subcategory: "ContainerAzure" +description: |- + An Anthos cluster running on Azure. +--- + +# google_container_azure_cluster + +An Anthos cluster running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_cluster +A basic example of a containerazure azure cluster +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + admin_groups { + group = "group@domain.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + + +``` +## Example Usage - beta_basic_enum_azure_cluster +A basic example of a containerazure azure cluster with lowercase enums (beta) +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" + provider = google-beta +} + +resource "google_container_azure_cluster" "primary" { + provider = google-beta + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" + + logging_config { + component_config { + enable_components = ["system_components", "workloads"] + } + } + +} + +resource "google_container_azure_client" "basic" { + provider = google-beta + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Configuration related to the cluster RBAC settings. + +* `azure_region` - + (Required) + The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region. + +* `control_plane` - + (Required) + Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Cluster-wide networking configuration. + +* `resource_group_id` - + (Required) + The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*` + + + +The `authorization` block supports: + +* `admin_groups` - + (Optional) + Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +* `admin_users` - + (Required) + Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `control_plane` block supports: + +* `database_encryption` - + (Optional) + Optional. Configuration related to application-layer secrets encryption. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `replica_placements` - + (Optional) + Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + SSH configuration for how to access the underlying control plane machines. + +* `subnet_id` - + (Required) + The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying control plane Azure resources. + +* `version` - + (Required) + The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`. + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `pod_address_cidr_blocks` - + (Required) + The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster. + +* `virtual_network_id` - + (Required) + The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `azure_services_authentication` - + (Optional) + Azure authentication configuration for management of Azure resources + +* `client` - + (Optional) + Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `logging_config` - + (Optional) + (Beta only) Logging configuration. + +* `project` - + (Optional) + The project for the resource + + + +The `admin_groups` block supports: + +* `group` - + (Required) + The name of the group, e.g. `my-group@domain.com`. + +The `admin_users` block supports: + +* `username` - + (Required) + The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `azure_services_authentication` block supports: + +* `application_id` - + (Required) + The Azure Active Directory Application ID for Authentication configuration. + +* `tenant_id` - + (Required) + The Azure Active Directory Tenant ID for Authentication configuration. + +The `database_encryption` block supports: + +* `key_id` - + (Required) + The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported. + +The `main_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `proxy_config` block supports: + +* `resource_group_id` - + (Required) + The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` + +* `secret_id` - + (Required) + The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. + +The `replica_placements` block supports: + +* `azure_availability_zone` - + (Required) + For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk. + +* `subnet_id` - + (Required) + For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration. + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `logging_config` block supports: + +* `component_config` - + (Optional) + Configuration of the logging components. + +The `component_config` block supports: + +* `enable_components` - + (Optional) + Components of the logging configuration to be enabled. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClusters/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cluster using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}" + to = google_container_azure_cluster.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Cluster can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_cluster.default projects/{{project}}/locations/{{location}}/azureClusters/{{name}} +$ terraform import google_container_azure_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_cluster.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown new file mode 100644 index 000000000000..127c6371f325 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/container_azure_node_pool.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "ContainerAzure" +description: |- + An Anthos node pool running on Azure. +--- + +# google_container_azure_node_pool + +An Anthos node pool running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) +## Example Usage - basic_azure_node_pool +A basic example of a containerazure azure node pool +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + proxy_config { + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + secret_id = "https://my--dev-keyvault.vault.azure.net/secrets/my--dev-secret/0000000000000000000000000000000000" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + labels = { + key_one = "label_one" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + management { + auto_repair = true + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The azureCluster for the resource + +* `config` - + (Required) + The node configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration. + +* `version` - + (Required) + The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Maximum number of nodes in the node pool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `image_type` - + (Optional) + (Beta only) The OS image type to use on node pool instances. + +* `labels` - + (Optional) + Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + SSH configuration for how to access the node pool machines. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +Please refer to the field `effective_annotations` for all of the annotations present on the resource. + +* `azure_availability_zone` - + (Optional) + Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`. + +* `management` - + (Optional) + The Management configuration for this node pool. + +* `project` - + (Optional) + The project for the resource + + + +The `proxy_config` block supports: + +* `resource_group_id` - + (Required) + The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` + +* `secret_id` - + (Required) + The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `management` block supports: + +* `auto_repair` - + (Optional) + Optional. Whether or not the nodes will be automatically repaired. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `effective_annotations` - + All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently pending changes to the node pool. + +* `state` - + Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}` +* `{{project}}/{{location}}/{{cluster}}/{{name}}` +* `{{location}}/{{cluster}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import NodePool using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}" + to = google_container_azure_node_pool.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), NodePool can be imported using one of the formats above. For example: + +``` +$ terraform import google_container_azure_node_pool.default projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}} +$ terraform import google_container_azure_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_azure_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown new file mode 100644 index 000000000000..847fcf9d7e4c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_asset.html.markdown @@ -0,0 +1,270 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Asset resource +--- + +# google_dataplex_asset + +The Dataplex Asset resource + +## Example Usage - basic_asset +```hcl +resource "google_storage_bucket" "basic_bucket" { + name = "bucket" + location = "us-west1" + uniform_bucket_level_access = true + lifecycle { + ignore_changes = [ + labels + ] + } + + project = "my-project-name" +} + +resource "google_dataplex_lake" "basic_lake" { + name = "lake" + location = "us-west1" + project = "my-project-name" +} + + +resource "google_dataplex_zone" "basic_zone" { + name = "zone" + location = "us-west1" + lake = google_dataplex_lake.basic_lake.name + type = "RAW" + + discovery_spec { + enabled = false + } + + + resource_spec { + location_type = "SINGLE_REGION" + } + + project = "my-project-name" +} + + +resource "google_dataplex_asset" "primary" { + name = "asset" + location = "us-west1" + + lake = google_dataplex_lake.basic_lake.name + dataplex_zone = google_dataplex_zone.basic_zone.name + + discovery_spec { + enabled = false + } + + resource_spec { + name = "projects/my-project-name/buckets/bucket" + type = "STORAGE_BUCKET" + } + + labels = { + env = "foo" + my-asset = "exists" + } + + + project = "my-project-name" + depends_on = [ + google_storage_bucket.basic_bucket + ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `dataplex_zone` - + (Required) + The zone for the resource + +* `discovery_spec` - + (Required) + Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone. + +* `lake` - + (Required) + The lake for the resource + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the asset. + +* `resource_spec` - + (Required) + Required. Immutable. Specification of the resource that is referenced by this asset. + + + +The `discovery_spec` block supports: + +* `csv_options` - + (Optional) + Optional. Configuration for CSV data. + +* `enabled` - + (Required) + Required. Whether discovery is enabled. + +* `exclude_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `include_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `json_options` - + (Optional) + Optional. Configuration for Json data. + +* `schedule` - + (Optional) + Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + +The `resource_spec` block supports: + +* `name` - + (Optional) + Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}` + +* `read_access_mode` - + (Optional) + Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED + +* `type` - + (Required) + Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET + +- - - + +* `description` - + (Optional) + Optional. Description of the asset. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User defined labels for the asset. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + + + +The `csv_options` block supports: + +* `delimiter` - + (Optional) + Optional. The delimiter being used to separate values. This defaults to ','. + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +* `header_rows` - + (Optional) + Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. + +The `json_options` block supports: + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}` + +* `create_time` - + Output only. The time when the asset was created. + +* `discovery_status` - + Output only. Status of the discovery feature applied to data referenced by this asset. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `resource_status` - + Output only. Status of the resource referenced by this asset. + +* `security_status` - + Output only. Status of the security policy applied to resource referenced by this asset. + +* `state` - + Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the asset was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Asset can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}` +* `{{project}}/{{location}}/{{lake}}/{{dataplex_zone}}/{{name}}` +* `{{location}}/{{lake}}/{{dataplex_zone}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Asset using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}" + to = google_dataplex_asset.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Asset can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_asset.default projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}} +$ terraform import google_dataplex_asset.default {{project}}/{{location}}/{{lake}}/{{dataplex_zone}}/{{name}} +$ terraform import google_dataplex_asset.default {{location}}/{{lake}}/{{dataplex_zone}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown new file mode 100644 index 000000000000..aa9140250af2 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_lake.html.markdown @@ -0,0 +1,144 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Lake resource +--- + +# google_dataplex_lake + +The Dataplex Lake resource + +## Example Usage - basic_lake +A basic example of a dataplex lake +```hcl +resource "google_dataplex_lake" "primary" { + location = "us-west1" + name = "lake" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "my-project-name" + + labels = { + my-lake = "exists" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the lake. + + + +- - - + +* `description` - + (Optional) + Optional. Description of the lake. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User-defined labels for the lake. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `metastore` - + (Optional) + Optional. Settings to manage lake and Dataproc Metastore service instance association. + +* `project` - + (Optional) + The project for the resource + + + +The `metastore` block supports: + +* `service` - + (Optional) + Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}` + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{name}}` + +* `asset_status` - + Output only. Aggregated status of the underlying assets of the lake. + +* `create_time` - + Output only. The time when the lake was created. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `metastore_status` - + Output only. Metastore status of the lake. + +* `service_account` - + Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake. + +* `state` - + Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the lake was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Lake can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{name}}` +* `{{project}}/{{location}}/{{name}}` +* `{{location}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{name}}" + to = google_dataplex_lake.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Lake can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_lake.default projects/{{project}}/locations/{{location}}/lakes/{{name}} +$ terraform import google_dataplex_lake.default {{project}}/{{location}}/{{name}} +$ terraform import google_dataplex_lake.default {{location}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown new file mode 100644 index 000000000000..80977fc6d17b --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/dataplex_zone.html.markdown @@ -0,0 +1,224 @@ +--- +subcategory: "Dataplex" +description: |- + The Dataplex Zone resource +--- + +# google_dataplex_zone + +The Dataplex Zone resource + +## Example Usage - basic_zone +A basic example of a dataplex zone +```hcl +resource "google_dataplex_zone" "primary" { + discovery_spec { + enabled = false + } + + lake = google_dataplex_lake.basic.name + location = "us-west1" + name = "zone" + + resource_spec { + location_type = "MULTI_REGION" + } + + type = "RAW" + description = "Zone for DCL" + display_name = "Zone for DCL" + project = "my-project-name" + labels = {} +} + +resource "google_dataplex_lake" "basic" { + location = "us-west1" + name = "lake" + description = "Lake for DCL" + display_name = "Lake for DCL" + project = "my-project-name" + + labels = { + my-lake = "exists" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `discovery_spec` - + (Required) + Required. Specification of the discovery feature applied to data in this zone. + +* `lake` - + (Required) + The lake for the resource + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of the zone. + +* `resource_spec` - + (Required) + Required. Immutable. Specification of the resources that are referenced by the assets within this zone. + +* `type` - + (Required) + Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED + + + +The `discovery_spec` block supports: + +* `csv_options` - + (Optional) + Optional. Configuration for CSV data. + +* `enabled` - + (Required) + Required. Whether discovery is enabled. + +* `exclude_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `include_patterns` - + (Optional) + Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names. + +* `json_options` - + (Optional) + Optional. Configuration for Json data. + +* `schedule` - + (Optional) + Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or TZ=${IANA_TIME_ZONE}". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York 1 * * * *". + +The `resource_spec` block supports: + +* `location_type` - + (Required) + Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION + +- - - + +* `description` - + (Optional) + Optional. Description of the zone. + +* `display_name` - + (Optional) + Optional. User friendly display name. + +* `labels` - + (Optional) + Optional. User defined labels for the zone. + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + + + +The `csv_options` block supports: + +* `delimiter` - + (Optional) + Optional. The delimiter being used to separate values. This defaults to ','. + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings. + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +* `header_rows` - + (Optional) + Optional. The number of rows to interpret as header rows that should be skipped when reading data rows. + +The `json_options` block supports: + +* `disable_type_inference` - + (Optional) + Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean). + +* `encoding` - + (Optional) + Optional. The character encoding of the data. The default is UTF-8. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}` + +* `asset_status` - + Output only. Aggregated status of the underlying assets of the zone. + +* `create_time` - + Output only. The time when the zone was created. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `state` - + Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +* `uid` - + Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name. + +* `update_time` - + Output only. The time when the zone was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Zone can be imported using any of these accepted formats: +* `projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}` +* `{{project}}/{{location}}/{{lake}}/{{name}}` +* `{{location}}/{{lake}}/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Zone using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}" + to = google_dataplex_zone.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Zone can be imported using one of the formats above. For example: + +``` +$ terraform import google_dataplex_zone.default projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}} +$ terraform import google_dataplex_zone.default {{project}}/{{location}}/{{lake}}/{{name}} +$ terraform import google_dataplex_zone.default {{location}}/{{lake}}/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown b/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown new file mode 100644 index 000000000000..815a3e1adda8 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/firebaserules_release.html.markdown @@ -0,0 +1,171 @@ +--- +subcategory: "Firebaserules" +description: |- + +--- + +# google_firebaserules_release + + + +For more information, see: +* [Get started with Firebase Security Rules](https://firebase.google.com/docs/rules/get-started) +## Example Usage - firestore_release +Creates a Firebase Rules Release to the default Cloud Firestore instance +```hcl +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore" + project = "my-project-name" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "my-project-name" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +} + +``` +## Example Usage - firestore_release_additional +Creates a Firebase Rules Release to an additional Cloud Firestore instance +```hcl +resource "google_firebaserules_release" "primary" { + name = "cloud.firestore/database" + project = "my-project-name" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.firestore.name}" +} + +resource "google_firebaserules_ruleset" "firestore" { + project = "my-project-name" + + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } +} + +``` +## Example Usage - storage_release +Creates a Firebase Rules Release for a Storage bucket +```hcl +resource "google_firebaserules_release" "primary" { + provider = google-beta + name = "firebase.storage/${google_storage_bucket.bucket.name}" + ruleset_name = "projects/my-project-name/rulesets/${google_firebaserules_ruleset.storage.name}" + project = "my-project-name" + + lifecycle { + replace_triggered_by = [ + google_firebaserules_ruleset.storage + ] + } +} + +# Provision a non-default Cloud Storage bucket. +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = "my-project-name" + name = "bucket" + location = "us-west1" +} + +# Make the Storage bucket accessible for Firebase SDKs, authentication, and Firebase Security Rules. +resource "google_firebase_storage_bucket" "bucket" { + provider = google-beta + project = "my-project-name" + bucket_id = google_storage_bucket.bucket.name +} + +# Create a ruleset of Firebase Security Rules from a local file. +resource "google_firebaserules_ruleset" "storage" { + provider = google-beta + project = "my-project-name" + source { + files { + name = "storage.rules" + content = "service firebase.storage {match /b/{bucket}/o {match /{allPaths=**} {allow read, write: if request.auth != null;}}}" + } + } + + depends_on = [ + google_firebase_storage_bucket.bucket + ] +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + Format: `projects/{project_id}/releases/{release_id}`\Firestore Rules Releases will **always** have the name 'cloud.firestore' + +* `ruleset_name` - + (Required) + Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created. + + + +- - - + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/releases/{{name}}` + +* `create_time` - + Output only. Time the release was created. + +* `disabled` - + Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release. + +* `update_time` - + Output only. Time the release was updated. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Release can be imported using any of these accepted formats: +* `projects/{{project}}/releases/{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Release using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/releases/{{name}}" + to = google_firebaserules_release.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Release can be imported using one of the formats above. For example: + +``` +$ terraform import google_firebaserules_release.default projects/{{project}}/releases/{{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown b/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown new file mode 100644 index 000000000000..f1b3ff4a0887 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/firebaserules_ruleset.html.markdown @@ -0,0 +1,140 @@ +--- +subcategory: "Firebaserules" +description: |- + +--- + +# google_firebaserules_ruleset + + + +For more information, see: +* [Get started with Firebase Security Rules](https://firebase.google.com/docs/rules/get-started) +## Example Usage - basic_ruleset +Creates a basic Firestore ruleset +```hcl +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + fingerprint = "" + } + + language = "" + } + + project = "my-project-name" +} + + +``` +## Example Usage - minimal_ruleset +Creates a minimal Firestore ruleset +```hcl +resource "google_firebaserules_ruleset" "primary" { + source { + files { + content = "service cloud.firestore {match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } }" + name = "firestore.rules" + } + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `source` - + (Required) + `Source` for the `Ruleset`. + + + +The `source` block supports: + +* `files` - + (Required) + `File` set constituting the `Source` bundle. + +* `language` - + (Optional) + `Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS + +- - - + +* `project` - + (Optional) + The project for the resource + + + +The `files` block supports: + +* `content` - + (Required) + Textual Content. + +* `fingerprint` - + (Optional) + Fingerprint (e.g. github sha) associated with the `File`. + +* `name` - + (Required) + File name. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/rulesets/{{name}}` + +* `create_time` - + Output only. Time the `Ruleset` was created. + +* `metadata` - + Output only. The metadata for this ruleset. + +* `name` - + Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}` + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Ruleset can be imported using any of these accepted formats: +* `projects/{{project}}/rulesets/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Ruleset using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/rulesets/{{name}}" + to = google_firebaserules_ruleset.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Ruleset can be imported using one of the formats above. For example: + +``` +$ terraform import google_firebaserules_ruleset.default projects/{{project}}/rulesets/{{name}} +$ terraform import google_firebaserules_ruleset.default {{project}}/{{name}} +$ terraform import google_firebaserules_ruleset.default {{name}} +``` + + + diff --git a/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown b/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown new file mode 100644 index 000000000000..9f050594d1ad --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/recaptcha_enterprise_key.html.markdown @@ -0,0 +1,319 @@ +--- +subcategory: "RecaptchaEnterprise" +description: |- + The RecaptchaEnterprise Key resource +--- + +# google_recaptcha_enterprise_key + +The RecaptchaEnterprise Key resource + +## Example Usage - android_key +A basic test of recaptcha enterprise key that can be used by Android apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + project = "my-project-name" + + testing_options { + testing_score = 0.8 + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - ios_key +A basic test of recaptcha enterprise key that can be used by iOS apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + project = "my-project-name" + + testing_options { + testing_score = 1 + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - minimal_key +A minimal test of recaptcha enterprise key +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } + + labels = {} +} + + +``` +## Example Usage - waf_key +A basic test of recaptcha enterprise key that includes WAF settings +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + waf_settings { + waf_feature = "CHALLENGE_PAGE" + waf_service = "CA" + } + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - web_key +A basic test of recaptcha enterprise key that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } + + labels = { + label-one = "value-one" + } +} + + +``` +## Example Usage - web_score_key +A basic test of recaptcha enterprise key with score integration type that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + project = "my-project-name" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } + + labels = { + label-one = "value-one" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `display_name` - + (Required) + Human-readable display name of this key. Modifiable by user. + + + +- - - + +* `android_settings` - + (Optional) + Settings for keys that can be used by Android apps. + +* `ios_settings` - + (Optional) + Settings for keys that can be used by iOS apps. + +* `labels` - + (Optional) + See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels). + +**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +Please refer to the field `effective_labels` for all of the labels present on the resource. + +* `project` - + (Optional) + The project for the resource + +* `testing_options` - + (Optional) + Options for user acceptance testing. + +* `waf_settings` - + (Optional) + Settings specific to keys that can be used for WAF (Web Application Firewall). + +* `web_settings` - + (Optional) + Settings for keys that can be used by websites. + + + +The `android_settings` block supports: + +* `allow_all_package_names` - + (Optional) + If set to true, it means allowed_package_names will not be enforced. + +* `allowed_package_names` - + (Optional) + Android package names of apps allowed to use the key. Example: 'com.companyname.appname' + +The `ios_settings` block supports: + +* `allow_all_bundle_ids` - + (Optional) + If set to true, it means allowed_bundle_ids will not be enforced. + +* `allowed_bundle_ids` - + (Optional) + iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname' + +The `testing_options` block supports: + +* `testing_challenge` - + (Optional) + For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE + +* `testing_score` - + (Optional) + All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive. + +The `waf_settings` block supports: + +* `waf_feature` - + (Required) + Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS + +* `waf_service` - + (Required) + The WAF service that uses this key. Possible values: CA, FASTLY + +The `web_settings` block supports: + +* `allow_all_domains` - + (Optional) + If set to true, it means allowed_domains will not be enforced. + +* `allow_amp_traffic` - + (Optional) + If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type. + +* `allowed_domains` - + (Optional) + Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com' + +* `challenge_security_preference` - + (Optional) + Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY + +* `integration_type` - + (Required) + Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/keys/{{name}}` + +* `create_time` - + The timestamp corresponding to the creation of this Key. + +* `effective_labels` - + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. + +* `name` - + The resource id for the Key, which is the same as the Site Key itself. + +* `terraform_labels` - + The combination of labels configured directly on the resource and default labels configured on the provider. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + +Key can be imported using any of these accepted formats: +* `projects/{{project}}/keys/{{name}}` +* `{{project}}/{{name}}` +* `{{name}}` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Key using one of the formats above. For example: + + +```tf +import { + id = "projects/{{project}}/keys/{{name}}" + to = google_recaptcha_enterprise_key.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Key can be imported using one of the formats above. For example: + +``` +$ terraform import google_recaptcha_enterprise_key.default projects/{{project}}/keys/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{project}}/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{name}} +``` + + + From 791e8d7d966f53db60de8d1b5f100c604616c86b Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Wed, 18 Mar 2026 13:28:41 -0700 Subject: [PATCH 04/13] Simplify a test template --- ..._clouddeploy_target_generated_test.go.tmpl | 412 +++++++++--------- 1 file changed, 204 insertions(+), 208 deletions(-) diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl index 89bf127f12d2..635b54feeef3 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_generated_test.go.tmpl @@ -15,48 +15,7 @@ import ( dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) -{{- if eq $.TargetVersionName "ga" }} func TestAccClouddeployTarget_Target(t *testing.T) { -{{- else }} -func TestAccClouddeployTarget_MultiTarget(t *testing.T) { -{{- end }} - t.Parallel() - - context := map[string]interface{}{ - "project_name": envvar.GetTestProjectFromEnv(), - "region": envvar.GetTestRegionFromEnv(), - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ -{{- if ne $.TargetVersionName "ga" }} - PreCheck: func() { acctest.AccTestPreCheck(t) }, - - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccClouddeployTarget_MultiTarget(context), - }, - { - ResourceName: "google_clouddeploy_target.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, - }, - { - Config: testAccClouddeployTarget_MultiTargetUpdate0(context), - }, - { - ResourceName: "google_clouddeploy_target.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, - }, - }, - }) -} -func TestAccClouddeployTarget_RunTarget(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -66,43 +25,6 @@ func TestAccClouddeployTarget_RunTarget(t *testing.T) { } acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccClouddeployTarget_RunTarget(context), - }, - { - ResourceName: "google_clouddeploy_target.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, - }, - { - Config: testAccClouddeployTarget_RunTargetUpdate0(context), - }, - { - ResourceName: "google_clouddeploy_target.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, - }, - }, - }) -} -func TestAccClouddeployTarget_Target(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "project_name": envvar.GetTestProjectFromEnv(), - "region": envvar.GetTestRegionFromEnv(), - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ -{{- end }} PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), @@ -154,24 +76,22 @@ func TestAccClouddeployTarget_Target(t *testing.T) { }, }, }) -{{- if ne $.TargetVersionName "ga" }} } -func testAccClouddeployTarget_MultiTarget(context map[string]interface{}) string { +func testAccClouddeployTarget_Target(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { - location = "%{region}" - name = "tf-test-target%{random_suffix}" - deploy_parameters = {} - description = "multi-target description" + location = "%{region}" + name = "tf-test-target%{random_suffix}" - execution_configs { - usages = ["RENDER", "DEPLOY"] - execution_timeout = "3600s" + deploy_parameters = { + deployParameterKey = "deployParameterValue" } - multi_target { - target_ids = ["1", "2"] + description = "basic description" + + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/example-cluster-name" } project = "%{project_name}" @@ -188,22 +108,23 @@ resource "google_clouddeploy_target" "primary" { my_second_label = "example-label-2" } - provider = google-beta } + `, context) } -func testAccClouddeployTarget_MultiTargetUpdate0(context map[string]interface{}) string { +func testAccClouddeployTarget_TargetUpdate0(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "updated mutli-target description" + description = "updated description" - multi_target { - target_ids = ["1", "2", "3"] + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true } project = "%{project_name}" @@ -216,116 +137,94 @@ resource "google_clouddeploy_target" "primary" { } labels = { - my_second_label = "example-label-2" + my_second_label = "updated-example-label-2" my_third_label = "example-label-3" } - provider = google-beta } + `, context) } -func testAccClouddeployTarget_RunTarget(context map[string]interface{}) string { +func testAccClouddeployTarget_TargetUpdate1(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "basic description" + description = "updated description" execution_configs { - usages = ["RENDER", "DEPLOY"] - execution_timeout = "3600s" + usages = ["RENDER", "DEPLOY"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" } - project = "%{project_name}" - require_approval = false - - run { - location = "projects/%{project_name}/locations/%{region}" + gke { + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true } + project = "%{project_name}" + require_approval = true + annotations = { - my_first_annotation = "example-annotation-1" + my_second_annotation = "updated-example-annotation-2" - my_second_annotation = "example-annotation-2" + my_third_annotation = "example-annotation-3" } labels = { - my_first_label = "example-label-1" + my_second_label = "updated-example-label-2" - my_second_label = "example-label-2" + my_third_label = "example-label-3" } - provider = google-beta } + `, context) } -func testAccClouddeployTarget_RunTargetUpdate0(context map[string]interface{}) string { +func testAccClouddeployTarget_TargetUpdate2(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "basic description" - project = "%{project_name}" - require_approval = true - - run { - location = "projects/%{project_name}/locations/%{region}" - } - - annotations = { - my_first_annotation = "example-annotation-1" - - my_second_annotation = "example-annotation-2" - - my_third_annotation = "example-annotation-3" - } - - labels = { - my_first_label = "example-label-1" + description = "updated description" - my_second_label = "example-label-2" + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://my-bucket/my-dir" + service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" } - provider = google-beta -} -`, context) -{{- end }} -} - -func testAccClouddeployTarget_Target(context map[string]interface{}) string { - return acctest.Nprintf(` -resource "google_clouddeploy_target" "primary" { - location = "%{region}" - name = "tf-test-target%{random_suffix}" - - deploy_parameters = { - deployParameterKey = "deployParameterValue" + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" } - description = "basic description" - gke { - cluster = "projects/%{project_name}/locations/%{region}/clusters/example-cluster-name" + cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" + internal_ip = true } project = "%{project_name}" - require_approval = false + require_approval = true annotations = { - my_first_annotation = "example-annotation-1" + my_second_annotation = "updated-example-annotation-2" - my_second_annotation = "example-annotation-2" + my_third_annotation = "example-annotation-3" } labels = { - my_first_label = "example-label-1" + my_second_label = "updated-example-label-2" - my_second_label = "example-label-2" + my_third_label = "example-label-3" } } @@ -333,7 +232,7 @@ resource "google_clouddeploy_target" "primary" { `, context) } -func testAccClouddeployTarget_TargetUpdate0(context map[string]interface{}) string { +func testAccClouddeployTarget_TargetUpdate3(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" @@ -341,9 +240,24 @@ resource "google_clouddeploy_target" "primary" { deploy_parameters = {} description = "updated description" + execution_configs { + usages = ["RENDER"] + artifact_storage = "gs://other-bucket/other-dir" + service_account = "other-owner@%{project_name}.iam.gserviceaccount.com" + verbose = true + } + + execution_configs { + usages = ["DEPLOY"] + artifact_storage = "gs://deploy-bucket/deploy-dir" + service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" + worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + } + gke { cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" internal_ip = true + proxy_url = "http://10.0.0.1" } project = "%{project_name}" @@ -366,69 +280,128 @@ resource "google_clouddeploy_target" "primary" { `, context) } -func testAccClouddeployTarget_TargetUpdate1(context map[string]interface{}) string { +{{- if ne $.TargetVersionName "ga" }} +func TestAccClouddeployTarget_MultiTarget(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_MultiTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_MultiTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func TestAccClouddeployTarget_RunTarget(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckClouddeployTargetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccClouddeployTarget_RunTarget(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccClouddeployTarget_RunTargetUpdate0(context), + }, + { + ResourceName: "google_clouddeploy_target.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccClouddeployTarget_MultiTarget(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "updated description" + description = "multi-target description" execution_configs { - usages = ["RENDER", "DEPLOY"] - artifact_storage = "gs://my-bucket/my-dir" - service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" } - gke { - cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" - internal_ip = true + multi_target { + target_ids = ["1", "2"] } project = "%{project_name}" - require_approval = true + require_approval = false annotations = { - my_second_annotation = "updated-example-annotation-2" + my_first_annotation = "example-annotation-1" - my_third_annotation = "example-annotation-3" + my_second_annotation = "example-annotation-2" } labels = { - my_second_label = "updated-example-label-2" + my_first_label = "example-label-1" - my_third_label = "example-label-3" + my_second_label = "example-label-2" } + provider = google-beta } - `, context) } -func testAccClouddeployTarget_TargetUpdate2(context map[string]interface{}) string { +func testAccClouddeployTarget_MultiTargetUpdate0(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "updated description" - - execution_configs { - usages = ["RENDER"] - artifact_storage = "gs://my-bucket/my-dir" - service_account = "pool-owner@%{project_name}.iam.gserviceaccount.com" - } - - execution_configs { - usages = ["DEPLOY"] - artifact_storage = "gs://deploy-bucket/deploy-dir" - service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" - worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" - } + description = "updated mutli-target description" - gke { - cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" - internal_ip = true + multi_target { + target_ids = ["1", "2", "3"] } project = "%{project_name}" @@ -441,63 +414,86 @@ resource "google_clouddeploy_target" "primary" { } labels = { - my_second_label = "updated-example-label-2" + my_second_label = "example-label-2" my_third_label = "example-label-3" } + provider = google-beta } - `, context) } -func testAccClouddeployTarget_TargetUpdate3(context map[string]interface{}) string { +func testAccClouddeployTarget_RunTarget(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_clouddeploy_target" "primary" { location = "%{region}" name = "tf-test-target%{random_suffix}" deploy_parameters = {} - description = "updated description" + description = "basic description" execution_configs { - usages = ["RENDER"] - artifact_storage = "gs://other-bucket/other-dir" - service_account = "other-owner@%{project_name}.iam.gserviceaccount.com" - verbose = true + usages = ["RENDER", "DEPLOY"] + execution_timeout = "3600s" } - execution_configs { - usages = ["DEPLOY"] - artifact_storage = "gs://deploy-bucket/deploy-dir" - service_account = "deploy-pool-owner@%{project_name}.iam.gserviceaccount.com" - worker_pool = "projects/%{project_name}/locations/%{region}/workerPools/my-deploy-pool" + project = "%{project_name}" + require_approval = false + + run { + location = "projects/%{project_name}/locations/%{region}" } - gke { - cluster = "projects/%{project_name}/locations/%{region}/clusters/different-example-cluster-name" - internal_ip = true - proxy_url = "http://10.0.0.1" + annotations = { + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" } - project = "%{project_name}" - require_approval = true + labels = { + my_first_label = "example-label-1" + + my_second_label = "example-label-2" + } + provider = google-beta +} + +`, context) +} + +func testAccClouddeployTarget_RunTargetUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_clouddeploy_target" "primary" { + location = "%{region}" + name = "tf-test-target%{random_suffix}" + deploy_parameters = {} + description = "basic description" + project = "%{project_name}" + require_approval = true + + run { + location = "projects/%{project_name}/locations/%{region}" + } annotations = { - my_second_annotation = "updated-example-annotation-2" + my_first_annotation = "example-annotation-1" + + my_second_annotation = "example-annotation-2" my_third_annotation = "example-annotation-3" } labels = { - my_second_label = "updated-example-label-2" + my_first_label = "example-label-1" - my_third_label = "example-label-3" + my_second_label = "example-label-2" } + provider = google-beta } - `, context) } +{{- end }} func testAccCheckClouddeployTargetDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { From 95b37ed8b4cd345255c9dff98f90ab050c4eae62 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Wed, 18 Mar 2026 15:45:53 -0700 Subject: [PATCH 05/13] Untemplatize files that don't need to be templates; update docs; update meta.yaml attribution --- docs/content/_index.md | 14 +------------- docs/content/code-review/release-notes.md | 4 ++-- docs/content/code-review/review-pr.md | 1 - docs/content/reference/make-commands.md | 5 ++--- docs/content/reference/metadata.md | 4 ++-- .../{apikeys_utils.go.tmpl => apikeys_utils.go} | 2 +- .../services/apikeys/{client.go.tmpl => client.go} | 0 .../{key_internal.go.tmpl => key_internal.go} | 10 +++++----- .../apikeys/{key_schema.go.tmpl => key_schema.go} | 2 +- ...tmpl => resource_apikeys_key_generated_test.go} | 0 .../apikeys/resource_apikeys_key_meta.yaml | 2 +- ...ads_utils.go.tmpl => assuredworkloads_utils.go} | 4 ++-- .../assuredworkloads/{client.go.tmpl => client.go} | 0 ...ource_assured_workloads_workload_meta.yaml.tmpl | 2 +- ...{workload_schema.go.tmpl => workload_schema.go} | 2 +- .../resource_cloudbuild_worker_pool_meta.yaml | 2 +- .../clouddeploy/{client.go.tmpl => client.go} | 0 ...ernal.go.tmpl => delivery_pipeline_internal.go} | 10 +++++----- ..._schema.go.tmpl => delivery_pipeline_schema.go} | 2 +- ...esource_clouddeploy_delivery_pipeline_meta.yaml | 2 +- .../resource_clouddeploy_target_meta.yaml | 2 +- ...{target_internal.go.tmpl => target_internal.go} | 10 +++++----- .../{target_schema.go.tmpl => target_schema.go} | 2 +- .../containeraws/{client.go.tmpl => client.go} | 0 .../resource_container_aws_cluster_meta.yaml.tmpl | 2 +- ...resource_container_aws_node_pool_meta.yaml.tmpl | 2 +- ...t_internal.go.tmpl => azure_client_internal.go} | 10 +++++----- ...lient_schema.go.tmpl => azure_client_schema.go} | 2 +- .../containerazure/{client.go.tmpl => client.go} | 0 ...ource_container_azure_client_generated_test.go} | 0 .../resource_container_azure_client_meta.yaml | 2 +- ...resource_container_azure_cluster_meta.yaml.tmpl | 2 +- ...source_container_azure_node_pool_meta.yaml.tmpl | 2 +- .../{asset_internal.go.tmpl => asset_internal.go} | 10 +++++----- .../dataplex/{client.go.tmpl => client.go} | 0 .../{dataplex_utils.go.tmpl => dataplex_utils.go} | 0 ...l => resource_dataplex_asset_generated_test.go} | 0 .../dataplex/resource_dataplex_asset_meta.yaml | 2 +- ...pl => resource_dataplex_lake_generated_test.go} | 0 .../dataplex/resource_dataplex_lake_meta.yaml | 2 +- ...pl => resource_dataplex_zone_generated_test.go} | 0 .../dataplex/resource_dataplex_zone_meta.yaml | 2 +- ...ource_dataproc_workflow_template_meta.yaml.tmpl | 2 +- .../firebaserules/{client.go.tmpl => client.go} | 0 ...elease_internal.go.tmpl => release_internal.go} | 8 ++++---- .../{release_schema.go.tmpl => release_schema.go} | 2 +- .../{release_utils.go.tmpl => release_utils.go} | 0 .../resource_firebaserules_release_meta.yaml | 2 +- ...source_firebaserules_ruleset_generated_test.go} | 0 .../resource_firebaserules_ruleset_meta.yaml | 2 +- ...uleset_internal.go.tmpl => ruleset_internal.go} | 8 ++++---- .../{ruleset_schema.go.tmpl => ruleset_schema.go} | 2 +- ...ource_gke_hub_feature_membership_meta.yaml.tmpl | 2 +- .../{client.go.tmpl => client.go} | 0 .../{key_internal.go.tmpl => key_internal.go} | 10 +++++----- .../{key_schema.go.tmpl => key_schema.go} | 2 +- ...rce_recaptcha_enterprise_key_generated_test.go} | 0 .../resource_recaptcha_enterprise_key_meta.yaml | 2 +- 58 files changed, 74 insertions(+), 88 deletions(-) rename mmv1/third_party/terraform/services/apikeys/{apikeys_utils.go.tmpl => apikeys_utils.go} (92%) rename mmv1/third_party/terraform/services/apikeys/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/apikeys/{key_internal.go.tmpl => key_internal.go} (99%) rename mmv1/third_party/terraform/services/apikeys/{key_schema.go.tmpl => key_schema.go} (99%) rename mmv1/third_party/terraform/services/apikeys/{resource_apikeys_key_generated_test.go.tmpl => resource_apikeys_key_generated_test.go} (100%) rename mmv1/third_party/terraform/services/assuredworkloads/{assuredworkloads_utils.go.tmpl => assuredworkloads_utils.go} (94%) rename mmv1/third_party/terraform/services/assuredworkloads/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/assuredworkloads/{workload_schema.go.tmpl => workload_schema.go} (99%) rename mmv1/third_party/terraform/services/clouddeploy/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/clouddeploy/{delivery_pipeline_internal.go.tmpl => delivery_pipeline_internal.go} (99%) rename mmv1/third_party/terraform/services/clouddeploy/{delivery_pipeline_schema.go.tmpl => delivery_pipeline_schema.go} (99%) rename mmv1/third_party/terraform/services/clouddeploy/{target_internal.go.tmpl => target_internal.go} (99%) rename mmv1/third_party/terraform/services/clouddeploy/{target_schema.go.tmpl => target_schema.go} (99%) rename mmv1/third_party/terraform/services/containeraws/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/containerazure/{azure_client_internal.go.tmpl => azure_client_internal.go} (96%) rename mmv1/third_party/terraform/services/containerazure/{azure_client_schema.go.tmpl => azure_client_schema.go} (97%) rename mmv1/third_party/terraform/services/containerazure/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/containerazure/{resource_container_azure_client_generated_test.go.tmpl => resource_container_azure_client_generated_test.go} (100%) rename mmv1/third_party/terraform/services/dataplex/{asset_internal.go.tmpl => asset_internal.go} (99%) rename mmv1/third_party/terraform/services/dataplex/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/dataplex/{dataplex_utils.go.tmpl => dataplex_utils.go} (100%) rename mmv1/third_party/terraform/services/dataplex/{resource_dataplex_asset_generated_test.go.tmpl => resource_dataplex_asset_generated_test.go} (100%) rename mmv1/third_party/terraform/services/dataplex/{resource_dataplex_lake_generated_test.go.tmpl => resource_dataplex_lake_generated_test.go} (100%) rename mmv1/third_party/terraform/services/dataplex/{resource_dataplex_zone_generated_test.go.tmpl => resource_dataplex_zone_generated_test.go} (100%) rename mmv1/third_party/terraform/services/firebaserules/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/firebaserules/{release_internal.go.tmpl => release_internal.go} (97%) rename mmv1/third_party/terraform/services/firebaserules/{release_schema.go.tmpl => release_schema.go} (97%) rename mmv1/third_party/terraform/services/firebaserules/{release_utils.go.tmpl => release_utils.go} (100%) rename mmv1/third_party/terraform/services/firebaserules/{resource_firebaserules_ruleset_generated_test.go.tmpl => resource_firebaserules_ruleset_generated_test.go} (100%) rename mmv1/third_party/terraform/services/firebaserules/{ruleset_internal.go.tmpl => ruleset_internal.go} (98%) rename mmv1/third_party/terraform/services/firebaserules/{ruleset_schema.go.tmpl => ruleset_schema.go} (98%) rename mmv1/third_party/terraform/services/recaptchaenterprise/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/recaptchaenterprise/{key_internal.go.tmpl => key_internal.go} (99%) rename mmv1/third_party/terraform/services/recaptchaenterprise/{key_schema.go.tmpl => key_schema.go} (99%) rename mmv1/third_party/terraform/services/recaptchaenterprise/{resource_recaptcha_enterprise_key_generated_test.go.tmpl => resource_recaptcha_enterprise_key_generated_test.go} (100%) diff --git a/docs/content/_index.md b/docs/content/_index.md index f85822683a53..54afbdf4bc12 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -35,11 +35,10 @@ detection. ### Resource types -There are three types of resources supported by Magic Modules: +There are two types of resources supported by Magic Modules: + MMv1 + Handwritten -+ DCL/tpgtools The following sections describe these tools in detail. @@ -96,17 +95,6 @@ In the providers, handwritten resources and datasources are stored in `PROVIDER/ is `google` or `google-beta`, `SERVICE` is the service name, and `FILENAME` is the name of the handwritten file in magic-modules. Handwritten files do not have an `AUTO GENERATED CODE` header. -#### DCL aka tpgtools (maintenance mode) - -DCL / tpgtools is similar to MMv1; however, it is in maintenance mode, which means that new resources using the DCL are not being added. - -DCL-based files start with the following header: - -``` -***     AUTO GENERATED CODE    ***    Type: DCL     *** -``` - - ## Other Resources + [Extending Terraform](https://www.terraform.io/plugin) diff --git a/docs/content/code-review/release-notes.md b/docs/content/code-review/release-notes.md index ef1380a84ffe..122406b22aa0 100644 --- a/docs/content/code-review/release-notes.md +++ b/docs/content/code-review/release-notes.md @@ -98,10 +98,10 @@ For each release note block, choose an appropriate type from the following list: Do | Don't -- | ----- Use past tense to describe the end state after the change is released. Start with a verb. For example, "added...", "fixed...", or "resolved...". You can use future tense to describe future changes, such as saying that a deprecated field will be removed in a future version. | Don't use present or future tense to describe changes that are included in the pull request. -Write user-focused release notes. For example, reference specific impacted terraform resource and field names, and discuss changes in behavior users will experience. | Avoid API field/resource/feature names. Avoid implementation details. Avoid language that requires understanding of provider internals. However, in case of substantial refactorings like API version changes or engine changes (tpgtools/DCL -> MMv1, handwritten <> MMv1) **do** cover the change so users can quickly identify the release if they are affected by the change. +Write user-focused release notes. For example, reference specific impacted terraform resource and field names, and discuss changes in behavior users will experience. | Avoid API field/resource/feature names. Avoid implementation details. Avoid language that requires understanding of provider internals. However, in case of substantial refactorings like API version changes or engine changes (handwritten <> MMv1) **do** cover the change so users can quickly identify the release if they are affected by the change. Surround resource or field names with backticks. | Don't use resource or field names without punctuation or with other punctuation like quotation marks. Use impersonal third person. | Don't use "I", "you", etc. -If the pull request impacts a specific product, begin your release note with that product name followed by a colon. Use lower case for the first letter after the colon. For example, `cloudrun: added...` For MMv1 resources, use the folder name that contains the yaml files as the product name; for handwritten or tpgtools resources, use the API subdomain; for broad cross-product changes, use `provider`. | Don't begin your release note with the full resource name. Don't add backticks around the product name. Don't capitalize the first letter after the colon. +If the pull request impacts a specific product, begin your release note with that product name followed by a colon. Use lower case for the first letter after the colon. For example, `cloudrun: added...` For MMv1 resources, use the folder name that contains the yaml files as the product name; for handwritten resources, use the API subdomain; for broad cross-product changes, use `provider`. | Don't begin your release note with the full resource name. Don't add backticks around the product name. Don't capitalize the first letter after the colon. ### Examples diff --git a/docs/content/code-review/review-pr.md b/docs/content/code-review/review-pr.md index 3a2ed0a086cf..bebbc78ed4ec 100644 --- a/docs/content/code-review/review-pr.md +++ b/docs/content/code-review/review-pr.md @@ -13,7 +13,6 @@ This page provides guidelines for reviewing a Magic Modules pull request (PR). The following types of PRs may require additional scrutiny and/or multiple reviewers. -- DCL to MMv1 migrations - Adding multi-actor fields (fields whose values can be altered as a side effect of changes made to a different resource) ## Review diff --git a/docs/content/reference/make-commands.md b/docs/content/reference/make-commands.md index 169f1329faa5..f9d9991015bb 100644 --- a/docs/content/reference/make-commands.md +++ b/docs/content/reference/make-commands.md @@ -34,10 +34,9 @@ make provider VERSION=ga OUTPUT_PATH="$GOPATH/src/github.com/hashicorp/terraform - `OUTPUT_PATH`: Required. The location you are generating provider code into. - `VERSION`: Required. The version of the provider you are building into. Valid values are `ga` and `beta`. -- `PRODUCT`: Limits generations to the specified folder within `mmv1/products` or `tpgtools/api`. Handwritten files from `mmv1/third_party/terraform` are always generated into the downstream regardless of this setting, so you can provide a non-existent product name to generate only handwritten code. Required if `RESOURCE` is specified. **Using `PRODUCT` skips the pre-generation cleanup step. This is considered advanced usage; recommend running a full, clean build (`make provider` without `PRODUCT`) beforehand if repositories may be out of sync.** +- `PRODUCT`: Limits generations to the specified folder within `mmv1/products`. Handwritten files from `mmv1/third_party/terraform` are always generated into the downstream regardless of this setting, so you can provide a non-existent product name to generate only handwritten code. Required if `RESOURCE` is specified. **Using `PRODUCT` skips the pre-generation cleanup step. This is considered advanced usage; recommend running a full, clean build (`make provider` without `PRODUCT`) beforehand if repositories may be out of sync.** - `SKIP_CLEAN`: If set to `true`, skips the default pre-generation cleanup of `OUTPUT_PATH` during a full provider build. Has no effect if `PRODUCT` is specified (as cleanup is already skipped). Example: `make provider VERSION=ga OUTPUT_PATH=... SKIP_CLEAN=true`. -- `RESOURCE`: Limits generation to the specified resource within a particular product. For `mmv1` resources, matches the resource's `name` field (set in its configuration file).For `tpgtools` resources, matches the terraform resource name. -- `ENGINE`: Modifies `make provider` to only generate code using the specified engine. Valid values are `mmv1` or `tpgtools`. (Providing `tpgtools` will still generate any prerequisite mmv1 files required for tpgtools.) +- `RESOURCE`: Limits generation to the specified resource within a particular product. For `mmv1` resources, matches the resource's `name` field (set in its configuration file). #### Cleaning up old files diff --git a/docs/content/reference/metadata.md b/docs/content/reference/metadata.md index d043eea42132..9a3c031cc818 100644 --- a/docs/content/reference/metadata.md +++ b/docs/content/reference/metadata.md @@ -9,7 +9,7 @@ This page documents the properties available in meta.yaml files. These files mak Meta.yaml files are auto-generated for MMv1 generated resources. -DCL and Handwritten resources will have handwritten meta.yaml files in the appropriate service directory in [mmv1/third_party/terraform/services/](https://github.com/GoogleCloudPlatform/magic-modules/tree/main/mmv1/third_party/terraform/services). The file name will be `resource_PRODUCT_RESOURCE_meta.yaml(.tmpl)`. For example, `resource_compute_instance_meta.yaml.tmpl` Handwritten meta.yaml files with a `.tmpl` extension can use version guards (`{{- if ne $.TargetVersionName "ga" }}...{{- else}}...{{- end}}`) to exclude beta fields from the `google` provider. +Handwritten resources will have handwritten meta.yaml files in the appropriate service directory in [mmv1/third_party/terraform/services/](https://github.com/GoogleCloudPlatform/magic-modules/tree/main/mmv1/third_party/terraform/services). The file name will be `resource_PRODUCT_RESOURCE_meta.yaml(.tmpl)`. For example, `resource_compute_instance_meta.yaml.tmpl` Handwritten meta.yaml files with a `.tmpl` extension can use version guards (`{{- if ne $.TargetVersionName "ga" }}...{{- else}}...{{- end}}`) to exclude beta fields from the `google` provider. All resources and fields must be present in meta.yaml files for the provider(s) they're available in. @@ -21,7 +21,7 @@ The name of the Terraform resource. For example, "google_cloudfunctions2_functio ### `generation_type` -The generation method used to create the Terraform resource. For example, "mmv1", "dcl", "handwritten". +The generation method used to create the Terraform resource. For example, "mmv1" or "handwritten". ### `api_service_name` diff --git a/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go similarity index 92% rename from mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl rename to mmv1/third_party/terraform/services/apikeys/apikeys_utils.go index 9fb071bfb404..7bfd2e7124b8 100644 --- a/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go.tmpl +++ b/mmv1/third_party/terraform/services/apikeys/apikeys_utils.go @@ -17,7 +17,7 @@ func keyStringGetURL(userBasePath string, r *Key) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}/keyString", "https://apikeys.googleapis.com/v2/", userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}/keyString", "https://apikeys.googleapis.com/v2/", userBasePath, params), nil } func (c *Client) getKeyStringRaw(ctx context.Context, r *Key) ([]byte, error) { diff --git a/mmv1/third_party/terraform/services/apikeys/client.go.tmpl b/mmv1/third_party/terraform/services/apikeys/client.go similarity index 100% rename from mmv1/third_party/terraform/services/apikeys/client.go.tmpl rename to mmv1/third_party/terraform/services/apikeys/client.go diff --git a/mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key_internal.go similarity index 99% rename from mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl rename to mmv1/third_party/terraform/services/apikeys/key_internal.go index 41f67fb2bf16..559b07482fd4 100644 --- a/mmv1/third_party/terraform/services/apikeys/key_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/apikeys/key_internal.go @@ -103,7 +103,7 @@ func (r *Key) getURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Key) listURL(userBasePath string) (string, error) { @@ -111,7 +111,7 @@ func (r *Key) listURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/global/keys", nr.basePath(), userBasePath, params), nil } @@ -121,7 +121,7 @@ func (r *Key) createURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys?keyId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/global/keys?keyId={{name}}", nr.basePath(), userBasePath, params), nil } @@ -131,7 +131,7 @@ func (r *Key) deleteURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, params), nil } // keyApiOperation represents a mutable operation in the underlying REST @@ -1739,7 +1739,7 @@ func (r *Key) updateURL(userBasePath, updateName string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/locations/global/keys/{{name}}", nr.basePath(), userBasePath, fields), nil } diff --git a/mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl b/mmv1/third_party/terraform/services/apikeys/key_schema.go similarity index 99% rename from mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl rename to mmv1/third_party/terraform/services/apikeys/key_schema.go index d342aca956d3..ab554b48e87b 100644 --- a/mmv1/third_party/terraform/services/apikeys/key_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/apikeys/key_schema.go @@ -71,7 +71,7 @@ func DCLKeySchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Key": &dcl.Component{ Title: "Key", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/global/keys/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/locations/global/keys/{{name}}", Locations: []string{ "global", }, diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml index 3e3bfe25f6be..e378775095be 100644 --- a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_apikeys_key' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'apikeys.googleapis.com' api_version: 'v2' api_resource_type_kind: 'Key' diff --git a/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go similarity index 94% rename from mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl rename to mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go index 8331dd43c3b4..d57fc332293e 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/assuredworkloads_utils.go @@ -28,7 +28,7 @@ func (r *Workload) projectURL(userBasePath string, index int) (string, error) { userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v1/", matches[1]) } } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}", "https://cloudresourcemanager.googleapis.com/v1/", userBasePath, params), nil + return dcl.URL("projects/{{project}}", "https://cloudresourcemanager.googleapis.com/v1/", userBasePath, params), nil } // Returns the URL of the folder resource with the given index in the workload. @@ -46,7 +46,7 @@ func (r *Workload) folderURL(userBasePath string, index int) (string, error) { userBasePath = fmt.Sprintf("https://cloudresourcemanager%s/v2/", matches[1]) } } - return dcl.URL("folders/{{ "{{" }}folder{{ "}}" }}", "https://cloudresourcemanager.googleapis.com/v2/", userBasePath, params), nil + return dcl.URL("folders/{{folder}}", "https://cloudresourcemanager.googleapis.com/v2/", userBasePath, params), nil } // Returns the lifecycle state of the project or folder resource with the given url. diff --git a/mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/client.go similarity index 100% rename from mmv1/third_party/terraform/services/assuredworkloads/client.go.tmpl rename to mmv1/third_party/terraform/services/assuredworkloads/client.go diff --git a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl index cd17ac3fc915..45d8544b3e16 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/resource_assured_workloads_workload_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_assured_workloads_workload' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'assuredworkloads.googleapis.com' {{- if ne $.TargetVersionName "ga" }} api_version: 'v1beta1' diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl b/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go similarity index 99% rename from mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl rename to mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go index e56c85c5622d..682cc15e4d18 100644 --- a/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go @@ -85,7 +85,7 @@ func DCLWorkloadSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Workload": &dcl.Component{ Title: "Workload", - ID: "organizations/{{ "{{" }}organization{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workloads/{{ "{{" }}name{{ "}}" }}", + ID: "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}", UsesStateHint: true, ParentContainer: "organization", LabelsField: "labels", diff --git a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml index 9ccad61d4411..dca7d6eaad31 100644 --- a/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml +++ b/mmv1/third_party/terraform/services/cloudbuild/resource_cloudbuild_worker_pool_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_cloudbuild_worker_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'cloudbuild.googleapis.com' api_version: 'v1' api_resource_type_kind: 'WorkerPool' diff --git a/mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/client.go similarity index 100% rename from mmv1/third_party/terraform/services/clouddeploy/client.go.tmpl rename to mmv1/third_party/terraform/services/clouddeploy/client.go diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go similarity index 99% rename from mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl rename to mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go index fec0faa4833f..d788ebb3c099 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_internal.go @@ -264,7 +264,7 @@ func (r *DeliveryPipeline) getURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *DeliveryPipeline) listURL(userBasePath string) (string, error) { @@ -273,7 +273,7 @@ func (r *DeliveryPipeline) listURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines", nr.basePath(), userBasePath, params), nil } @@ -284,7 +284,7 @@ func (r *DeliveryPipeline) createURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines?deliveryPipelineId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines?deliveryPipelineId={{name}}", nr.basePath(), userBasePath, params), nil } @@ -295,7 +295,7 @@ func (r *DeliveryPipeline) deleteURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *DeliveryPipeline) SetPolicyURL(userBasePath string) string { @@ -5118,7 +5118,7 @@ func (r *DeliveryPipeline) updateURL(userBasePath, updateName string) (string, e "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, fields), nil } diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go similarity index 99% rename from mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl rename to mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go index 052fa6a83496..e471067e3ae6 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go @@ -89,7 +89,7 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "DeliveryPipeline": &dcl.Component{ Title: "DeliveryPipeline", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/deliveryPipelines/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml index 0ec222248960..71de656bc1a2 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_delivery_pipeline_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_clouddeploy_delivery_pipeline' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'clouddeploy.googleapis.com' api_version: 'v1' api_resource_type_kind: 'DeliveryPipeline' diff --git a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml index 647944fdd0d3..edffa3158112 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml +++ b/mmv1/third_party/terraform/services/clouddeploy/resource_clouddeploy_target_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_clouddeploy_target' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'clouddeploy.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Target' diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go similarity index 99% rename from mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl rename to mmv1/third_party/terraform/services/clouddeploy/target_internal.go index 4ead247eaeb7..52c5afa45af9 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/target_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/target_internal.go @@ -107,7 +107,7 @@ func (r *Target) getURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Target) listURL(userBasePath string) (string, error) { @@ -116,7 +116,7 @@ func (r *Target) listURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/targets", nr.basePath(), userBasePath, params), nil } @@ -127,7 +127,7 @@ func (r *Target) createURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets?targetId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/targets?targetId={{name}}", nr.basePath(), userBasePath, params), nil } @@ -138,7 +138,7 @@ func (r *Target) deleteURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Target) SetPolicyURL(userBasePath string) string { @@ -2473,7 +2473,7 @@ func (r *Target) updateURL(userBasePath, updateName string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/targets/{{name}}", nr.basePath(), userBasePath, fields), nil } diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target_schema.go similarity index 99% rename from mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl rename to mmv1/third_party/terraform/services/clouddeploy/target_schema.go index 3d18ed795cd7..e5c627cd187c 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/target_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/target_schema.go @@ -89,7 +89,7 @@ func DCLTargetSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Target": &dcl.Component{ Title: "Target", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/targets/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/locations/{{location}}/targets/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ diff --git a/mmv1/third_party/terraform/services/containeraws/client.go.tmpl b/mmv1/third_party/terraform/services/containeraws/client.go similarity index 100% rename from mmv1/third_party/terraform/services/containeraws/client.go.tmpl rename to mmv1/third_party/terraform/services/containeraws/client.go diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl index 49005a50735c..f25424849a69 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_cluster_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_aws_cluster' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AwsCluster' diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl index 68a119049c43..17c7c8cf1cbf 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_aws_node_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AwsNodePool' diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go similarity index 96% rename from mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl rename to mmv1/third_party/terraform/services/containerazure/azure_client_internal.go index cfc4a2249bad..091124d42324 100644 --- a/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/azure_client_internal.go @@ -35,7 +35,7 @@ func (r *AzureClient) basePath() string { params := map[string]interface{}{ "location": dcl.ValueOrEmptyString(r.Location), } - return dcl.Nprintf("https://{{ "{{" }}location{{ "}}" }}-gkemulticloud.googleapis.com/v1", params) + return dcl.Nprintf("https://{{location}}-gkemulticloud.googleapis.com/v1", params) } func (r *AzureClient) getURL(userBasePath string) (string, error) { @@ -45,7 +45,7 @@ func (r *AzureClient) getURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *AzureClient) listURL(userBasePath string) (string, error) { @@ -54,7 +54,7 @@ func (r *AzureClient) listURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients", nr.basePath(), userBasePath, params), nil } @@ -65,7 +65,7 @@ func (r *AzureClient) createURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients?azureClientId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients?azureClientId={{name}}", nr.basePath(), userBasePath, params), nil } @@ -76,7 +76,7 @@ func (r *AzureClient) deleteURL(userBasePath string) (string, error) { "location": dcl.ValueOrEmptyString(nr.Location), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/azureClients/{{name}}", nr.basePath(), userBasePath, params), nil } // clientApiOperation represents a mutable operation in the underlying REST diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go similarity index 97% rename from mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl rename to mmv1/third_party/terraform/services/containerazure/azure_client_schema.go index 0ecd8512c311..cde16847588f 100644 --- a/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go @@ -95,7 +95,7 @@ func DCLAzureClientSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Client": &dcl.Component{ Title: "AzureClient", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClients/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/locations/{{location}}/azureClients/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ diff --git a/mmv1/third_party/terraform/services/containerazure/client.go.tmpl b/mmv1/third_party/terraform/services/containerazure/client.go similarity index 100% rename from mmv1/third_party/terraform/services/containerazure/client.go.tmpl rename to mmv1/third_party/terraform/services/containerazure/client.go diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml index c1a24a3b786f..733b8c81196f 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_container_azure_client' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureClient' diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl index 0238e7b209e7..54598e64ffe5 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_cluster_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_azure_cluster' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureCluster' diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl index fa47f16b6f8c..ef92715fe513 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_container_azure_node_pool' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkemulticloud.googleapis.com' api_version: 'v1' api_resource_type_kind: 'AzureNodePool' diff --git a/mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset_internal.go similarity index 99% rename from mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/asset_internal.go index 3176de95c68c..02db40169243 100644 --- a/mmv1/third_party/terraform/services/dataplex/asset_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/asset_internal.go @@ -121,7 +121,7 @@ func (r *Asset) getURL(userBasePath string) (string, error) { "lake": dcl.ValueOrEmptyString(nr.Lake), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Asset) listURL(userBasePath string) (string, error) { @@ -132,7 +132,7 @@ func (r *Asset) listURL(userBasePath string) (string, error) { "dataplexZone": dcl.ValueOrEmptyString(nr.DataplexZone), "lake": dcl.ValueOrEmptyString(nr.Lake), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets", nr.basePath(), userBasePath, params), nil } @@ -145,7 +145,7 @@ func (r *Asset) createURL(userBasePath string) (string, error) { "lake": dcl.ValueOrEmptyString(nr.Lake), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets?assetId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets?assetId={{name}}", nr.basePath(), userBasePath, params), nil } @@ -158,7 +158,7 @@ func (r *Asset) deleteURL(userBasePath string) (string, error) { "lake": dcl.ValueOrEmptyString(nr.Lake), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Asset) SetPolicyURL(userBasePath string) string { @@ -2358,7 +2358,7 @@ func (r *Asset) updateURL(userBasePath, updateName string) (string, error) { "lake": dcl.ValueOrEmptyString(nr.Lake), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplexZone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplexZone}}/assets/{{name}}", nr.basePath(), userBasePath, fields), nil } diff --git a/mmv1/third_party/terraform/services/dataplex/client.go.tmpl b/mmv1/third_party/terraform/services/dataplex/client.go similarity index 100% rename from mmv1/third_party/terraform/services/dataplex/client.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/client.go diff --git a/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl b/mmv1/third_party/terraform/services/dataplex/dataplex_utils.go similarity index 100% rename from mmv1/third_party/terraform/services/dataplex/dataplex_utils.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/dataplex_utils.go diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml index 10a0f3fbfb9c..dce96244aa71 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_asset' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Asset' diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml index ab6d509ab284..977e7aaeb14c 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_lake' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Lake' diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml index 08a8b327ecd7..45cc3eda9aa0 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_dataplex_zone' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataplex.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Zone' diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl index 671f2e291c66..35bb465f38b0 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_workflow_template_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_dataproc_workflow_template' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'dataproc.googleapis.com' api_version: 'v1' api_resource_type_kind: 'WorkflowTemplate' diff --git a/mmv1/third_party/terraform/services/firebaserules/client.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/client.go similarity index 100% rename from mmv1/third_party/terraform/services/firebaserules/client.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/client.go diff --git a/mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_internal.go similarity index 97% rename from mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/release_internal.go index a9cae068c615..fa75570172f2 100644 --- a/mmv1/third_party/terraform/services/firebaserules/release_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/release_internal.go @@ -35,7 +35,7 @@ func (r *Release) getURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/releases/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Release) listURL(userBasePath string) (string, error) { @@ -43,7 +43,7 @@ func (r *Release) listURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/releases", nr.basePath(), userBasePath, params), nil } @@ -52,7 +52,7 @@ func (r *Release) createURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/releases", nr.basePath(), userBasePath, params), nil } @@ -62,7 +62,7 @@ func (r *Release) deleteURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/releases/{{name}}", nr.basePath(), userBasePath, params), nil } // releaseApiOperation represents a mutable operation in the underlying REST diff --git a/mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_schema.go similarity index 97% rename from mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/release_schema.go index 0a0f821c228d..95f0171e816f 100644 --- a/mmv1/third_party/terraform/services/firebaserules/release_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/release_schema.go @@ -80,7 +80,7 @@ func DCLReleaseSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Release": &dcl.Component{ Title: "Release", - ID: "projects/{{ "{{" }}project{{ "}}" }}/releases/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/releases/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ diff --git a/mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release_utils.go similarity index 100% rename from mmv1/third_party/terraform/services/firebaserules/release_utils.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/release_utils.go diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml index 0f6fb0fda504..5caf910a758c 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_release_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_firebaserules_release' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'firebaserules.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Release' diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml index be3d282611ad..32dfa7a247b7 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_firebaserules_ruleset' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'firebaserules.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Ruleset' diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go similarity index 98% rename from mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go index a0cf13b109b8..b0807590434a 100644 --- a/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset_internal.go @@ -60,7 +60,7 @@ func (r *Ruleset) getURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/rulesets/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Ruleset) listURL(userBasePath string) (string, error) { @@ -68,7 +68,7 @@ func (r *Ruleset) listURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/rulesets", nr.basePath(), userBasePath, params), nil } @@ -77,7 +77,7 @@ func (r *Ruleset) createURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/rulesets", nr.basePath(), userBasePath, params), nil } @@ -87,7 +87,7 @@ func (r *Ruleset) deleteURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/rulesets/{{name}}", nr.basePath(), userBasePath, params), nil } // rulesetApiOperation represents a mutable operation in the underlying REST diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go similarity index 98% rename from mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl rename to mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go index 7e0d38fc0244..3044e24880e4 100644 --- a/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go @@ -80,7 +80,7 @@ func DCLRulesetSchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Ruleset": &dcl.Component{ Title: "Ruleset", - ID: "projects/{{ "{{" }}project{{ "}}" }}/rulesets/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/rulesets/{{name}}", ParentContainer: "project", HasCreate: true, SchemaProperty: dcl.Property{ diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl index 4cb604caa2c6..834d4d26de9d 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_meta.yaml.tmpl @@ -1,5 +1,5 @@ resource: 'google_gke_hub_feature_membership' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'gkehub.googleapis.com' {{- if ne $.TargetVersionName "ga" }} api_version: 'v1beta' diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/client.go similarity index 100% rename from mmv1/third_party/terraform/services/recaptchaenterprise/client.go.tmpl rename to mmv1/third_party/terraform/services/recaptchaenterprise/client.go diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go similarity index 99% rename from mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl rename to mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go index 398a758724e0..b4c24f16b6e1 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go.tmpl +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key_internal.go @@ -84,7 +84,7 @@ func (r *Key) getURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, params), nil } func (r *Key) listURL(userBasePath string) (string, error) { @@ -92,7 +92,7 @@ func (r *Key) listURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/keys", nr.basePath(), userBasePath, params), nil } @@ -101,7 +101,7 @@ func (r *Key) createURL(userBasePath string) (string, error) { params := map[string]interface{}{ "project": dcl.ValueOrEmptyString(nr.Project), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/keys", nr.basePath(), userBasePath, params), nil } @@ -111,7 +111,7 @@ func (r *Key) deleteURL(userBasePath string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, params), nil } // keyApiOperation represents a mutable operation in the underlying REST @@ -1546,7 +1546,7 @@ func (r *Key) updateURL(userBasePath, updateName string) (string, error) { "project": dcl.ValueOrEmptyString(nr.Project), "name": dcl.ValueOrEmptyString(nr.Name), } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil + return dcl.URL("projects/{{project}}/keys/{{name}}", nr.basePath(), userBasePath, fields), nil } diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go similarity index 99% rename from mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl rename to mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go index 8c1c0cbd34c8..2357be900e69 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go.tmpl +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go @@ -71,7 +71,7 @@ func DCLKeySchema() *dcl.Schema { Schemas: map[string]*dcl.Component{ "Key": &dcl.Component{ Title: "Key", - ID: "projects/{{ "{{" }}project{{ "}}" }}/keys/{{ "{{" }}name{{ "}}" }}", + ID: "projects/{{project}}/keys/{{name}}", ParentContainer: "project", LabelsField: "labels", HasCreate: true, diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go similarity index 100% rename from mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go.tmpl rename to mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml index ceb7cf403e09..b5b18e91c1cd 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_meta.yaml @@ -1,5 +1,5 @@ resource: 'google_recaptcha_enterprise_key' -generation_type: 'dcl' +generation_type: 'handwritten' api_service_name: 'recaptchaenterprise.googleapis.com' api_version: 'v1' api_resource_type_kind: 'Key' From b6dbc03c9f0e7a82ecda45570ea510cc1d5dedce Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Wed, 18 Mar 2026 15:53:58 -0700 Subject: [PATCH 06/13] Remove tpgtools references from the makefile --- GNUmakefile | 57 ++--------------------------------------------------- 1 file changed, 2 insertions(+), 55 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 1439a2bcd41d..f2384d6fbdbc 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -4,41 +4,16 @@ default: build # mm setup -ifeq ($(ENGINE),tpgtools) - # we specify the product to one that doesn't - # exist so exclusively build base tpgtools implementation - mmv1_compile=-p does-not-exist -else ifneq ($(PRODUCT),) +ifneq ($(PRODUCT),) mmv1_compile=--product $(PRODUCT) endif -# tpgtools setup -ifeq ($(ENGINE),mmv1) - # we specify the product to one that doesn't - # exist so exclusively build base mmv1 implementation - tpgtools_compile = --service does-not-exist -else ifneq ($(PRODUCT),) - tpgtools_compile = --service $(PRODUCT) -else - tpgtools_compile = -endif - ifneq ($(RESOURCE),) mmv1_compile += --resource $(RESOURCE) - tpgtools_compile += --resource $(RESOURCE) endif ifneq ($(OVERRIDES),) mmv1_compile += --overrides $(OVERRIDES) - tpgtools_compile += --overrides $(OVERRIDES)/tpgtools/overrides --path $(OVERRIDES)/tpgtools/api - serialize_compile = --overrides $(OVERRIDES)/tpgtools/overrides --path $(OVERRIDES)/tpgtools/api -else - tpgtools_compile += --path "api" --overrides "overrides" - serialize_compile = --path "api" --overrides "overrides" -endif - -ifneq ($(VERBOSE),) - tpgtools_compile += --logtostderr=1 --stderrthreshold=2 endif UNAME := $(shell uname) @@ -50,10 +25,6 @@ else SED_I := -i '' -E endif -ifeq ($(FORCE_DCL),) - FORCE_DCL=latest -endif - SHOULD_SKIP_CLEAN := false # Default: do not skip ifneq ($(SKIP_CLEAN),) ifneq ($(SKIP_CLEAN),false) @@ -75,12 +46,6 @@ mmv1: go run . --output $(OUTPUT_PATH) --version $(VERSION) $(mmv1_compile); \ fi -tpgtools: serialize - @echo "Executing tpgtools build for $(OUTPUT_PATH)"; - @cd tpgtools;\ - go run . --output $(OUTPUT_PATH) --version $(VERSION) $(tpgtools_compile); \ - rm serialization.go - clean-provider: check_safe_build @if [ -n "$(PRODUCT)" ]; then \ printf "\n\e[1;33mWARNING:\e[0m Skipping clean-provider step because PRODUCT ('$(PRODUCT)') is set.\n"; \ @@ -136,24 +101,6 @@ test: cd mmv1; \ go test ./... -serialize: - cd tpgtools;\ - cp -f serialization.go.base serialization.go &&\ - go run . $(serialize_compile) --mode "serialization" > temp.serial &&\ - mv -f temp.serial serialization.go - -upgrade-dcl: - make serialize - cd tpgtools && \ - go mod edit -dropreplace=github.com/GoogleCloudPlatform/declarative-resource-client-library &&\ - go mod edit -require=github.com/GoogleCloudPlatform/declarative-resource-client-library@$(FORCE_DCL) &&\ - go mod tidy;\ - MOD_LINE=$$(grep declarative-resource-client-library go.mod);\ - SUM_LINE=$$(grep declarative-resource-client-library go.sum);\ - cd ../mmv1/third_party/terraform && \ - sed ${SED_I} "s!.*declarative-resource-client-library.*!$$MOD_LINE!" go.mod; echo "$$SUM_LINE" >> go.sum - - validate_environment: check_parameters check_safe_build check_parameters: @@ -175,4 +122,4 @@ check_safe_build: doctor: ./scripts/doctor -.PHONY: mmv1 tpgtools test clean-provider validate_environment serialize doctor +.PHONY: mmv1 test clean-provider validate_environment doctor From 284c286c9876ffab4675c9f49f912263a1ce8c79 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Wed, 18 Mar 2026 16:39:05 -0700 Subject: [PATCH 07/13] Remove unused DCL files --- .../tpgdclresource/operations/compute.go | 117 ----------------- .../tpgdclresource/operations/crm.go | 123 ------------------ .../tpgdclresource/operations/datastore.go | 68 ---------- .../tpgdclresource/operations/dns.go | 59 --------- .../tpgdclresource/operations/knative.go | 84 ------------ .../tpgdclresource/operations/monitoring.go | 34 ----- .../tpgdclresource/operations/osconfig.go | 32 ----- .../tpgdclresource/operations/sql.go | 78 ----------- .../tpgdclresource/orgpolicy_utils.go | 35 ----- 9 files changed, 630 deletions(-) delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/compute.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/crm.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/datastore.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/dns.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/knative.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go delete mode 100755 mmv1/third_party/terraform/tpgdclresource/operations/sql.go delete mode 100644 mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/compute.go b/mmv1/third_party/terraform/tpgdclresource/operations/compute.go deleted file mode 100755 index 868ef74f2bfb..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/compute.go +++ /dev/null @@ -1,117 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "fmt" - "strings" - "time" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// ComputeOperation can be parsed from the returned API operation and waited on. -// Based on https://cloud.google.com/compute/docs/reference/rest/v1/regionOperations -type ComputeOperation struct { - ID string `json:"id"` - Error *ComputeOperationError `json:"error"` - SelfLink string `json:"selfLink"` - Status string `json:"status"` - TargetLink string `json:"targetLink"` - TargetID string `json:"targetId"` - // other irrelevant fields omitted - - config *dcl.Config -} - -// ComputeOperationError is the GCE operation's Error body. -type ComputeOperationError struct { - Code int `json:"code"` - Message string `json:"message"` - Errors []*ComputeOperationErrorError `json:"errors"` -} - -// String formats the OperationError as an error string. -func (e *ComputeOperationError) String() string { - if e == nil { - return "nil" - } - var b strings.Builder - for _, err := range e.Errors { - fmt.Fprintf(&b, "error code %q, message: %s\n", err.Code, err.Message) - } - if e.Code != 0 || e.Message != "" { - fmt.Fprintf(&b, "error code %d, message: %s\n", e.Code, e.Message) - } - - return b.String() -} - -// ComputeOperationErrorError is a singular error in a GCE operation. -type ComputeOperationErrorError struct { - Code string `json:"code"` - Message string `json:"message"` -} - -// Wait waits for an ComputeOperation to complete by fetching the operation until it completes. -func (op *ComputeOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - c.Logger.Infof("Waiting on operation: %v", op) - op.config = c - - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *ComputeOperation) handleResponse(resp *dcl.RetryDetails, err error) (*dcl.RetryDetails, error) { - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - - if op.Status != "DONE" { - return nil, dcl.OperationNotDone{} - } - - if op.Error != nil { - return nil, fmt.Errorf("operation received error: %v", op.Error) - } - - return resp, nil -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *ComputeOperation) FirstResponse() (map[string]any, bool) { - return make(map[string]any), false -} - -func (op *ComputeOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - return op.handleResponse(dcl.SendRequest(ctx, op.config, "GET", op.SelfLink, &bytes.Buffer{}, nil)) -} - -// ComputeGlobalOrganizationOperation can be parsed from the returned API operation and waited on. -// Based on https://cloud.google.com/compute/docs/reference/rest/v1/globalOrganizationOperations -type ComputeGlobalOrganizationOperation struct { - BaseOperation ComputeOperation - Parent string -} - -func (op *ComputeGlobalOrganizationOperation) Wait(ctx context.Context, c *dcl.Config, parent *string) error { - c.Logger.Infof("Waiting on: %v", op) - op.BaseOperation.config = c - - op.Parent = *parent - - return dcl.Do(ctx, op.operate, c.RetryProvider) -} - -func (op *ComputeGlobalOrganizationOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - return op.BaseOperation.handleResponse(dcl.SendRequest(ctx, op.BaseOperation.config, "GET", op.BaseOperation.SelfLink+"?parentId="+op.Parent, &bytes.Buffer{}, nil)) -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/crm.go b/mmv1/third_party/terraform/tpgdclresource/operations/crm.go deleted file mode 100755 index 3c86bde2326c..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/crm.go +++ /dev/null @@ -1,123 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "fmt" - "strings" - "time" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// CRMOperation can be parsed from the returned API operation and waited on. -// This is the typical GCP operation. -type CRMOperation struct { - Name string `json:"name"` - Error *CRMOperationError `json:"error"` - Done bool `json:"done"` - Response map[string]any `json:"response"` - Metadata map[string]any `json:"metadata"` - // other irrelevant fields omitted - - config *dcl.Config - basePath string - verb string - version string - - response map[string]any -} - -// CRMOperationError is the GCP operation's Error body. -type CRMOperationError struct { - Code int `json:"code"` - Message string `json:"message"` - Errors []*CRMOperationErrorError `json:"errors"` -} - -// String formats the CRMOperationError as an error string. -func (e *CRMOperationError) String() string { - if e == nil { - return "nil" - } - var b strings.Builder - for _, err := range e.Errors { - fmt.Fprintf(&b, "error code %q, message: %s\n", err.Code, err.Message) - } - if e.Code != 0 || e.Message != "" { - fmt.Fprintf(&b, "error code %d, message: %s\n", e.Code, e.Message) - } - - return b.String() -} - -// CRMOperationErrorError is a singular error in a GCP operation. -type CRMOperationErrorError struct { - Code string `json:"code"` - Message string `json:"message"` -} - -// Wait waits for an CRMOperation to complete by fetching the operation until it completes. -func (op *CRMOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { - c.Logger.Infof("Waiting on operation: %v", op) - op.config = c - op.basePath = basePath - op.verb = verb - - if len(op.Response) > 0 { - op.response = op.Response - } - - // base CRM resources use the v1 endpoint - op.version = "v1" - - // Tags resources require the v3 endpoint, and DCL merges the two into one Operation handler. Identify - // the operation kind by the "type" returned. - if t, ok := op.Metadata["@type"].(string); ok && strings.HasPrefix(t, "type.googleapis.com/google.cloud.resourcemanager.v3") { - op.version = "v3" - } - - if op.Done { - c.Logger.Infof("Completed operation: %v", op) - return nil - } - - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *CRMOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - u := dcl.URL(op.version+"/"+op.Name, op.basePath, op.config.BasePath, nil) - resp, err := dcl.SendRequest(ctx, op.config, op.verb, u, &bytes.Buffer{}, nil) - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - - if !op.Done { - return nil, dcl.OperationNotDone{} - } - - if op.Error != nil { - return nil, fmt.Errorf("operation received error: %+v", op.Error) - } - - if len(op.response) == 0 && len(op.Response) > 0 { - op.response = op.Response - } - - return resp, nil -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *CRMOperation) FirstResponse() (map[string]any, bool) { - return op.response, len(op.response) > 0 -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go b/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go deleted file mode 100755 index 4250fd8f0a63..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/datastore.go +++ /dev/null @@ -1,68 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "fmt" - "time" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// DatastoreOperation can be parsed from the returned API operation and waited on. -type DatastoreOperation struct { - Name string `json:"name"` - Done bool `json:"done"` - Metadata *DatastoreOperationMetadata `json:"metadata"` - Error *DatastoreOperationError `json:"error"` - config *dcl.Config -} - -// DatastoreOperationMetadata is an error in a datastore operation. -type DatastoreOperationMetadata struct { - IndexID string `json:"indexId"` -} - -// DatastoreOperationError is an error in a datastore operation. -type DatastoreOperationError struct { - Code int64 `json:"code"` - Message string `json:"message"` -} - -// Wait waits for an DatastoreOperation to complete by fetching the operation until it completes. -func (op *DatastoreOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - c.Logger.Infof("Waiting on operation: %v", op) - op.config = c - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *DatastoreOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - u := dcl.URL(op.Name, "https://datastore.googleapis.com/v1/", op.config.BasePath, nil) - resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, true, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - if !op.Done { - return nil, dcl.OperationNotDone{} - } - if op.Error != nil { - return nil, fmt.Errorf("operation received error: %+v", op.Error) - } - return resp, nil -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *DatastoreOperation) FirstResponse() (map[string]any, bool) { - return map[string]any{ - "indexId": op.Metadata.IndexID, - }, false -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/dns.go b/mmv1/third_party/terraform/tpgdclresource/operations/dns.go deleted file mode 100755 index 4d82b0be04eb..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/dns.go +++ /dev/null @@ -1,59 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "fmt" - "time" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// DNSOperation can be parsed from the returned API operation and waited on. -// This is used for Changes only. -// Project and ManagedZone must be set ahead of time. -type DNSOperation struct { - Status string `json:"status"` - ID string `json:"id"` - Project string - ManagedZone string - // other irrelevant fields omitted - - config *dcl.Config -} - -// Wait waits for an DNSOperation to complete by fetching the operation until it completes. -func (op *DNSOperation) Wait(ctx context.Context, c *dcl.Config, project, managedZone string) error { - c.Logger.Infof("Waiting on operation: %v", op) - op.config = c - op.ManagedZone = managedZone - op.Project = project - - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *DNSOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - u := fmt.Sprintf("https://dns.googleapis.com/dns/v1/projects/%s/managedZones/%s/changes/%s", op.Project, op.ManagedZone, op.ID) - resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - if op.Status != "done" { - return nil, dcl.OperationNotDone{} - } - return resp, nil -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *DNSOperation) FirstResponse() (map[string]any, bool) { - return make(map[string]any), false -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/knative.go b/mmv1/third_party/terraform/tpgdclresource/operations/knative.go deleted file mode 100755 index b0bfe061d4ac..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/knative.go +++ /dev/null @@ -1,84 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "fmt" - "time" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// KNativeOperation can be parsed from the returned Service. -type KNativeOperation struct { - Status KNativeOperationStatus `json:"status"` - Metadata KNativeOperationMetadata `json:"metadata"` - // other irrelevant fields omitted - - config *dcl.Config - basePath string - verb string - location string -} - -// KNativeOperationMetadata contains the Labels block. -type KNativeOperationMetadata struct { - SelfLink string `json:"selfLink"` - Labels map[string]string `json:"labels"` -} - -// KNativeOperationStatus contains the Conditions block. -type KNativeOperationStatus struct { - Conditions []KNativeOperationCondition `json:"conditions"` -} - -// KNativeOperationCondition contains the -type KNativeOperationCondition struct { - Type string `json:"type"` - Status string `json:"status"` -} - -// Wait waits for an DNSOperation to complete by fetching the operation until it completes. -func (op *KNativeOperation) Wait(ctx context.Context, c *dcl.Config, basePath, verb string) error { - c.Logger.Infof("Waiting on operation: %v", op) - op.config = c - op.basePath = basePath - op.verb = verb - - location, ok := op.Metadata.Labels["cloud.googleapis.com/location"] - if !ok { - return fmt.Errorf("no location found") - } - op.location = location - - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *KNativeOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - u := fmt.Sprintf("https://%s-run.googleapis.com/%s", op.location, op.Metadata.SelfLink) - resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, false, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - - for _, condition := range op.Status.Conditions { - if condition.Type == "Ready" && condition.Status == "True" { - return resp, nil - } - } - return nil, dcl.OperationNotDone{} -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *KNativeOperation) FirstResponse() (map[string]any, bool) { - return make(map[string]any), false -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go b/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go deleted file mode 100755 index e823252ef354..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/monitoring.go +++ /dev/null @@ -1,34 +0,0 @@ -package operations - -import ( - "context" - "fmt" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// MonitoringOperation can be parsed from the returned API operation and waited on. -type MonitoringOperation struct { - Name string `json:"name"` -} - -// Wait waits for an MonitoringOperation to complete by fetching the operation until it completes. -func (op *MonitoringOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - if op.Name != "" { - // Names come in the form "accessPolicies/{{name}}" - parts := strings.Split(op.Name, "/") - op.Name = parts[len(parts)-1] - } - return nil -} - -// FetchName will fetch the operation and return the name of the resource created. -// Monitoring creates resources with machine generated names. -// It must be called after the resource has been created. -func (op *MonitoringOperation) FetchName() (*string, error) { - if op.Name == "" { - return nil, fmt.Errorf("this operation (%s) has no name and probably hasn't been run before", op.Name) - } - return &op.Name, nil -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go b/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go deleted file mode 100755 index 7a61b4f1c371..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/osconfig.go +++ /dev/null @@ -1,32 +0,0 @@ -package operations - -import ( - "bytes" - "context" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// OSPolicyAssignmentDeleteOperation can be parsed from the returned API operation and waited on. -type OSPolicyAssignmentDeleteOperation struct { - Name string `json:"name"` - - config *dcl.Config -} - -// Wait waits for an OSPolicyAssignmentDeleteOperation to complete by waiting until the operation returns a 404. -func (op *OSPolicyAssignmentDeleteOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - c.Logger.Infof("Waiting on: %q", op.Name) - op.config = c - - return dcl.Do(ctx, op.operate, c.RetryProvider) -} - -func (op *OSPolicyAssignmentDeleteOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - u := dcl.URL(op.Name, "https://osconfig.googleapis.com/v1alpha", op.config.BasePath, nil) - resp, err := dcl.SendRequest(ctx, op.config, "GET", u, &bytes.Buffer{}, nil) - if dcl.IsNotFound(err) { - return nil, nil - } - return resp, dcl.OperationNotDone{} -} diff --git a/mmv1/third_party/terraform/tpgdclresource/operations/sql.go b/mmv1/third_party/terraform/tpgdclresource/operations/sql.go deleted file mode 100755 index 7cb22f7474e6..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/operations/sql.go +++ /dev/null @@ -1,78 +0,0 @@ -package operations - -import ( - "bytes" - "context" - "time" - - glog "github.com/golang/glog" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -// SQLOperation can be parsed from the returned API operation and waited on. -type SQLOperation struct { - ID string `json:"id"` - SelfLink string `json:"selfLink"` - Status string `json:"status"` - TargetLink string `json:"targetLink"` - // other irrelevant fields omitted - - config *dcl.Config -} - -// Wait waits for an Operation to complete by fetching the operation until it completes. -func (op *SQLOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - glog.Infof("Waiting on operation: %v", op) - op.config = c - - err := dcl.Do(ctx, op.operate, c.RetryProvider) - c.Logger.Infof("Completed operation: %v", op) - return err -} - -func (op *SQLOperation) operate(ctx context.Context) (*dcl.RetryDetails, error) { - resp, err := dcl.SendRequest(ctx, op.config, "GET", op.SelfLink, &bytes.Buffer{}, nil) - if err != nil { - if dcl.IsRetryableRequestError(op.config, err, true, time.Now()) { - return nil, dcl.OperationNotDone{} - } - return nil, err - } - if err := dcl.ParseResponse(resp.Response, op); err != nil { - return nil, err - } - if op.Status != "DONE" { - return nil, dcl.OperationNotDone{} - } - return resp, nil -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *SQLOperation) FirstResponse() (map[string]any, bool) { - return make(map[string]any), false -} - -// SQLCreateCertOperation is the operation used for creating SSL certs. -// They have a different format from other resources and other methods. -type SQLCreateCertOperation struct { - Operation SQLOperation `json:"operation"` - ClientCert struct { - CertInfo map[string]any `json:"certInfo"` - } `json:"clientCert"` - response map[string]any -} - -// Wait waits for an SQLOperation to complete by fetching the operation until it completes. -func (op *SQLCreateCertOperation) Wait(ctx context.Context, c *dcl.Config, _, _ string) error { - return op.Operation.Wait(ctx, c, "", "") -} - -// FirstResponse returns the first response that this operation receives with the resource. -// This response may contain special information. -func (op *SQLCreateCertOperation) FirstResponse() (map[string]any, bool) { - if len(op.ClientCert.CertInfo) > 0 { - return op.ClientCert.CertInfo, true - } - return make(map[string]any), false -} diff --git a/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go b/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go deleted file mode 100644 index 2cf61cf67ad1..000000000000 --- a/mmv1/third_party/terraform/tpgdclresource/orgpolicy_utils.go +++ /dev/null @@ -1,35 +0,0 @@ -package tpgdclresource - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -// OrgPolicyPolicy has a custom import method because the parent field needs to allow an additional forward slash -// to represent the type of parent (e.g. projects/{project_id}). -func ResourceOrgPolicyPolicyCustomImport(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", - "^(?P[^/]+/?[^/]*)/(?P[^/]+)", - }, d, config); err != nil { - return err - } - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - // reset name to match the one from resourceOrgPolicyPolicyRead - if err := d.Set("name", id); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(id) - - return nil -} From 554d6586b144b7e8d9214c3ac66cc44fd17cfcdc Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 10:17:13 -0700 Subject: [PATCH 08/13] More files de-templatized --- .../{provider_dcl_resources.go.tmpl => provider_dcl_resources.go} | 0 .../terraform/services/cloudbuild/{client.go.tmpl => client.go} | 0 .../cloudbuild/{cloudbuild_utils.go.tmpl => cloudbuild_utils.go} | 0 .../terraform/services/dataproc/{client.go.tmpl => client.go} | 0 .../terraform/services/gkehub/{client.go.tmpl => client.go} | 0 .../services/gkehub/{poco_utils.go.tmpl => poco_utils.go} | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename mmv1/third_party/terraform/provider/{provider_dcl_resources.go.tmpl => provider_dcl_resources.go} (100%) rename mmv1/third_party/terraform/services/cloudbuild/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/cloudbuild/{cloudbuild_utils.go.tmpl => cloudbuild_utils.go} (100%) rename mmv1/third_party/terraform/services/dataproc/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/gkehub/{client.go.tmpl => client.go} (100%) rename mmv1/third_party/terraform/services/gkehub/{poco_utils.go.tmpl => poco_utils.go} (100%) diff --git a/mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl b/mmv1/third_party/terraform/provider/provider_dcl_resources.go similarity index 100% rename from mmv1/third_party/terraform/provider/provider_dcl_resources.go.tmpl rename to mmv1/third_party/terraform/provider/provider_dcl_resources.go diff --git a/mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/client.go similarity index 100% rename from mmv1/third_party/terraform/services/cloudbuild/client.go.tmpl rename to mmv1/third_party/terraform/services/cloudbuild/client.go diff --git a/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go similarity index 100% rename from mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go.tmpl rename to mmv1/third_party/terraform/services/cloudbuild/cloudbuild_utils.go diff --git a/mmv1/third_party/terraform/services/dataproc/client.go.tmpl b/mmv1/third_party/terraform/services/dataproc/client.go similarity index 100% rename from mmv1/third_party/terraform/services/dataproc/client.go.tmpl rename to mmv1/third_party/terraform/services/dataproc/client.go diff --git a/mmv1/third_party/terraform/services/gkehub/client.go.tmpl b/mmv1/third_party/terraform/services/gkehub/client.go similarity index 100% rename from mmv1/third_party/terraform/services/gkehub/client.go.tmpl rename to mmv1/third_party/terraform/services/gkehub/client.go diff --git a/mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/poco_utils.go similarity index 100% rename from mmv1/third_party/terraform/services/gkehub/poco_utils.go.tmpl rename to mmv1/third_party/terraform/services/gkehub/poco_utils.go From cf3129b23ee098721b8262e70e28c8d618441a9a Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 10:21:35 -0700 Subject: [PATCH 09/13] Run gofmt --- .../services/apikeys/resource_apikeys_key_generated_test.go | 2 +- .../resource_container_azure_client_generated_test.go | 2 +- .../dataplex/resource_dataplex_asset_generated_test.go | 2 +- .../services/dataplex/resource_dataplex_lake_generated_test.go | 2 +- .../services/dataplex/resource_dataplex_zone_generated_test.go | 2 +- .../resource_firebaserules_ruleset_generated_test.go | 2 +- .../resource_recaptcha_enterprise_key_generated_test.go | 2 +- mmv1/third_party/terraform/tpgdclresource/dcl.go | 3 --- 8 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go index 1159bbf09a3a..e92d63798310 100644 --- a/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go +++ b/mmv1/third_party/terraform/services/apikeys/resource_apikeys_key_generated_test.go @@ -6,12 +6,12 @@ import ( "strings" "testing" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" ) func TestAccApikeysKey_AndroidKey(t *testing.T) { diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go index aca3cf0b360a..b18b2bfd91b1 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_client_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go index ed9cfab421b7..fd11ca55f0ee 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_asset_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go index f26d5255171d..7e4043359826 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_lake_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go index 5c71215790ea..caaf1f4a280f 100644 --- a/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go +++ b/mmv1/third_party/terraform/services/dataplex/resource_dataplex_zone_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go index 79cc2ef28871..76eee6c75919 100644 --- a/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go +++ b/mmv1/third_party/terraform/services/firebaserules/resource_firebaserules_ruleset_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go index 3e410201ed2e..9df2c9b9c5ca 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/resource_recaptcha_enterprise_key_generated_test.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" "testing" - + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" diff --git a/mmv1/third_party/terraform/tpgdclresource/dcl.go b/mmv1/third_party/terraform/tpgdclresource/dcl.go index d960e170b6dd..e99e66c1af2c 100644 --- a/mmv1/third_party/terraform/tpgdclresource/dcl.go +++ b/mmv1/third_party/terraform/tpgdclresource/dcl.go @@ -1,8 +1,5 @@ package tpgdclresource -import ( -) - var ( // CreateDirective restricts Apply to creating resources for Create CreateDirective = []ApplyOption{ From 2474383916ff739aa784e5489c39857bc9b3d72c Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 11:13:19 -0700 Subject: [PATCH 10/13] gkehub, containeraws, and containerazure test fixes --- .../resource_container_aws_node_pool.go.tmpl | 11 +++++++---- .../resource_container_azure_node_pool.go.tmpl | 11 +++++++---- .../resource_gke_hub_feature_membership_test.go.tmpl | 2 +- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl index d34cb8486735..7704fe0b4950 100644 --- a/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/resource_container_aws_node_pool.go.tmpl @@ -1295,15 +1295,18 @@ func expandContainerAwsNodePoolManagement(o interface{}) *NodePoolManagement { } func flattenContainerAwsNodePoolManagement(obj *NodePoolManagement) interface{} { - if obj == nil || obj.Empty() { + if obj == nil { return nil } - transformed := map[string]interface{}{ - "auto_repair": obj.AutoRepair, + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair } return []interface{}{transformed} - } func expandContainerAwsNodePoolUpdateSettings(o interface{}) *NodePoolUpdateSettings { diff --git a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl index 185e109c6163..1bb03cce5412 100644 --- a/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/resource_container_azure_node_pool.go.tmpl @@ -806,15 +806,18 @@ func expandContainerAzureNodePoolManagement(o interface{}) *NodePoolManagement { } func flattenContainerAzureNodePoolManagement(obj *NodePoolManagement) interface{} { - if obj == nil || obj.Empty() { + if obj == nil { return nil } - transformed := map[string]interface{}{ - "auto_repair": obj.AutoRepair, + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair } return []interface{}{transformed} - } func flattenContainerAzureNodePoolAnnotations(v map[string]string, d *schema.ResourceData) interface{} { diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl index f99844c4c398..3bbc24a27af3 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.tmpl @@ -1338,7 +1338,7 @@ resource "google_project_service" "container" { resource "google_project_service" "gkehub" { project = google_project.project.project_id - service = "googleapis.com" + service = "gkehub.googleapis.com" depends_on = [google_project_service.container] } From 0e7a4b56676599f93bb45abe250bd51cb07f274c Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 11:31:28 -0700 Subject: [PATCH 11/13] Remove extraneous _schema.go files --- .../terraform/services/apikeys/key_schema.go | 310 --- .../assuredworkloads/workload_schema.go | 552 ---- .../cloudbuild/worker_pool_schema.go.tmpl | 296 --- .../clouddeploy/delivery_pipeline_schema.go | 753 ------ .../services/clouddeploy/target_schema.go | 488 ---- .../containeraws/cluster_schema.go.tmpl | 754 ------ .../containeraws/node_pool_schema.go.tmpl | 661 ----- .../containerazure/azure_client_schema.go | 179 -- .../containerazure/cluster_schema.go.tmpl | 658 ----- .../containerazure/node_pool_schema.go.tmpl | 417 --- .../services/dataplex/asset_schema.go.tmpl | 504 ---- .../services/dataplex/lake_schema.go.tmpl | 280 --- .../services/dataplex/zone_schema.go.tmpl | 376 --- .../dataproc/workflow_template_schema.go.tmpl | 2230 ----------------- .../services/firebaserules/release_schema.go | 158 -- .../services/firebaserules/ruleset_schema.go | 211 -- .../gkehub/feature_membership_schema.go.tmpl | 807 ------ .../services/gkehub/feature_schema.go.tmpl | 331 --- .../services/gkehub/membership_schema.go.tmpl | 410 --- .../recaptchaenterprise/key_schema.go | 317 --- 20 files changed, 10692 deletions(-) delete mode 100644 mmv1/third_party/terraform/services/apikeys/key_schema.go delete mode 100644 mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go delete mode 100644 mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go delete mode 100644 mmv1/third_party/terraform/services/clouddeploy/target_schema.go delete mode 100644 mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/containerazure/azure_client_schema.go delete mode 100644 mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/firebaserules/release_schema.go delete mode 100644 mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go delete mode 100644 mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go diff --git a/mmv1/third_party/terraform/services/apikeys/key_schema.go b/mmv1/third_party/terraform/services/apikeys/key_schema.go deleted file mode 100644 index ab554b48e87b..000000000000 --- a/mmv1/third_party/terraform/services/apikeys/key_schema.go +++ /dev/null @@ -1,310 +0,0 @@ -package apikeys - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLKeySchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Apikeys/Key", - Description: "The Apikeys Key resource", - StructName: "Key", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Key": &dcl.Component{ - Title: "Key", - ID: "projects/{{project}}/locations/global/keys/{{name}}", - Locations: []string{ - "global", - }, - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - }, - Properties: map[string]*dcl.Property{ - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Human-readable display name of this API key. Modifiable by user.", - }, - "keyString": &dcl.Property{ - Type: "string", - GoName: "KeyString", - ReadOnly: true, - Description: "Output only. An encrypted and signed value held by this key. This field can be accessed only through the `GetKeyString` method.", - Immutable: true, - Sensitive: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The resource name of the key. The name must be unique within the project, must conform with RFC-1034, is restricted to lower-cased letters, and has a maximum length of 63 characters. In another word, the name must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", - Immutable: true, - Parameter: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "restrictions": &dcl.Property{ - Type: "object", - GoName: "Restrictions", - GoType: "KeyRestrictions", - Description: "Key restrictions.", - Properties: map[string]*dcl.Property{ - "androidKeyRestrictions": &dcl.Property{ - Type: "object", - GoName: "AndroidKeyRestrictions", - GoType: "KeyRestrictionsAndroidKeyRestrictions", - Description: "The Android apps that are allowed to use the key.", - Conflicts: []string{ - "browserKeyRestrictions", - "serverKeyRestrictions", - "iosKeyRestrictions", - }, - Required: []string{ - "allowedApplications", - }, - Properties: map[string]*dcl.Property{ - "allowedApplications": &dcl.Property{ - Type: "array", - GoName: "AllowedApplications", - Description: "A list of Android applications that are allowed to make API calls with this key.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "KeyRestrictionsAndroidKeyRestrictionsAllowedApplications", - Required: []string{ - "sha1Fingerprint", - "packageName", - }, - Properties: map[string]*dcl.Property{ - "packageName": &dcl.Property{ - Type: "string", - GoName: "PackageName", - Description: "The package name of the application.", - }, - "sha1Fingerprint": &dcl.Property{ - Type: "string", - GoName: "Sha1Fingerprint", - Description: "The SHA1 fingerprint of the application. For example, both sha1 formats are acceptable : DA:39:A3:EE:5E:6B:4B:0D:32:55:BF:EF:95:60:18:90:AF:D8:07:09 or DA39A3EE5E6B4B0D3255BFEF95601890AFD80709. Output format is the latter.", - }, - }, - }, - }, - }, - }, - "apiTargets": &dcl.Property{ - Type: "array", - GoName: "ApiTargets", - Description: "A restriction for a specific service and optionally one or more specific methods. Requests are allowed if they match any of these restrictions. If no restrictions are specified, all targets are allowed.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "KeyRestrictionsApiTargets", - Required: []string{ - "service", - }, - Properties: map[string]*dcl.Property{ - "methods": &dcl.Property{ - Type: "array", - GoName: "Methods", - Description: "Optional. List of one or more methods that can be called. If empty, all methods for the service are allowed. A wildcard (*) can be used as the last symbol. Valid examples: `google.cloud.translate.v2.TranslateService.GetSupportedLanguage` `TranslateText` `Get*` `translate.googleapis.com.Get*`", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "service": &dcl.Property{ - Type: "string", - GoName: "Service", - Description: "The service for this restriction. It should be the canonical service name, for example: `translate.googleapis.com`. You can use `gcloud services list` to get a list of services that are enabled in the project.", - }, - }, - }, - }, - "browserKeyRestrictions": &dcl.Property{ - Type: "object", - GoName: "BrowserKeyRestrictions", - GoType: "KeyRestrictionsBrowserKeyRestrictions", - Description: "The HTTP referrers (websites) that are allowed to use the key.", - Conflicts: []string{ - "serverKeyRestrictions", - "androidKeyRestrictions", - "iosKeyRestrictions", - }, - Required: []string{ - "allowedReferrers", - }, - Properties: map[string]*dcl.Property{ - "allowedReferrers": &dcl.Property{ - Type: "array", - GoName: "AllowedReferrers", - Description: "A list of regular expressions for the referrer URLs that are allowed to make API calls with this key.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "iosKeyRestrictions": &dcl.Property{ - Type: "object", - GoName: "IosKeyRestrictions", - GoType: "KeyRestrictionsIosKeyRestrictions", - Description: "The iOS apps that are allowed to use the key.", - Conflicts: []string{ - "browserKeyRestrictions", - "serverKeyRestrictions", - "androidKeyRestrictions", - }, - Required: []string{ - "allowedBundleIds", - }, - Properties: map[string]*dcl.Property{ - "allowedBundleIds": &dcl.Property{ - Type: "array", - GoName: "AllowedBundleIds", - Description: "A list of bundle IDs that are allowed when making API calls with this key.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "serverKeyRestrictions": &dcl.Property{ - Type: "object", - GoName: "ServerKeyRestrictions", - GoType: "KeyRestrictionsServerKeyRestrictions", - Description: "The IP addresses of callers that are allowed to use the key.", - Conflicts: []string{ - "browserKeyRestrictions", - "androidKeyRestrictions", - "iosKeyRestrictions", - }, - Required: []string{ - "allowedIps", - }, - Properties: map[string]*dcl.Property{ - "allowedIps": &dcl.Property{ - Type: "array", - GoName: "AllowedIps", - Description: "A list of the caller IP addresses that are allowed to make API calls with this key.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - }, - }, - "serviceAccountEmail": &dcl.Property{ - Type: "string", - GoName: "ServiceAccountEmail", - Description: "The email of the service account the key is bound to. If this field is specified, the key is a service account bound key and auth enabled. See [Documentation](https://cloud.google.com/docs/authentication/api-keys?#api-keys-bound-sa) for more details.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. Unique id in UUID4 format.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go b/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go deleted file mode 100644 index 682cc15e4d18..000000000000 --- a/mmv1/third_party/terraform/services/assuredworkloads/workload_schema.go +++ /dev/null @@ -1,552 +0,0 @@ -package assuredworkloads - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLWorkloadSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "AssuredWorkloads/Workload", - Description: "The AssuredWorkloads Workload resource", - StructName: "Workload", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Workload", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workload", - Required: true, - Description: "A full instance of a Workload", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Workload", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workload", - Required: true, - Description: "A full instance of a Workload", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Workload", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workload", - Required: true, - Description: "A full instance of a Workload", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Workload", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "organization", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Workload", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "organization", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Workload": &dcl.Component{ - Title: "Workload", - ID: "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}", - UsesStateHint: true, - ParentContainer: "organization", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "displayName", - "complianceRegime", - "organization", - "location", - }, - Properties: map[string]*dcl.Property{ - "billingAccount": &dcl.Property{ - Type: "string", - GoName: "BillingAccount", - Description: "Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/BillingAccount", - Field: "name", - }, - }, - Unreadable: true, - }, - "complianceRegime": &dcl.Property{ - Type: "string", - GoName: "ComplianceRegime", - GoType: "WorkloadComplianceRegimeEnum", - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT, IRS_1075", - Immutable: true, - Enum: []string{ - "COMPLIANCE_REGIME_UNSPECIFIED", - "IL4", - "CJIS", - "FEDRAMP_HIGH", - "FEDRAMP_MODERATE", - "US_REGIONAL_ACCESS", - "HIPAA", - "HITRUST", - "EU_REGIONS_AND_SUPPORT", - "CA_REGIONS_AND_SUPPORT", - "ITAR", - "AU_REGIONS_AND_US_SUPPORT", - "ASSURED_WORKLOADS_FOR_PARTNERS", - "ISR_REGIONS", - "ISR_REGIONS_AND_SUPPORT", - "CA_PROTECTED_B", - "IL5", - "IL2", - "JP_REGIONS_AND_SUPPORT", - "KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS", - "REGIONAL_CONTROLS", - "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS", - "HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_US_SUPPORT", - "IRS_1075", - }, - }, - "complianceStatus": &dcl.Property{ - Type: "object", - GoName: "ComplianceStatus", - GoType: "WorkloadComplianceStatus", - ReadOnly: true, - Description: "Output only. Count of active Violations in the Workload.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "acknowledgedViolationCount": &dcl.Property{ - Type: "array", - GoName: "AcknowledgedViolationCount", - Description: "Number of current orgPolicy violations which are acknowledged.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "integer", - Format: "int64", - GoType: "int64", - }, - }, - "activeViolationCount": &dcl.Property{ - Type: "array", - GoName: "ActiveViolationCount", - Description: "Number of current orgPolicy violations which are not acknowledged.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "integer", - Format: "int64", - GoType: "int64", - }, - }, - }, - }, - "compliantButDisallowedServices": &dcl.Property{ - Type: "array", - GoName: "CompliantButDisallowedServices", - ReadOnly: true, - Description: "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Immutable. The Workload creation timestamp.", - Immutable: true, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", - }, - "ekmProvisioningResponse": &dcl.Property{ - Type: "object", - GoName: "EkmProvisioningResponse", - GoType: "WorkloadEkmProvisioningResponse", - ReadOnly: true, - Description: "Optional. Represents the Ekm Provisioning State of the given workload.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "ekmProvisioningErrorDomain": &dcl.Property{ - Type: "string", - GoName: "EkmProvisioningErrorDomain", - GoType: "WorkloadEkmProvisioningResponseEkmProvisioningErrorDomainEnum", - Description: "Indicates Ekm provisioning error if any. Possible values: EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED, UNSPECIFIED_ERROR, GOOGLE_SERVER_ERROR, EXTERNAL_USER_ERROR, EXTERNAL_PARTNER_ERROR, TIMEOUT_ERROR", - Immutable: true, - Enum: []string{ - "EKM_PROVISIONING_ERROR_DOMAIN_UNSPECIFIED", - "UNSPECIFIED_ERROR", - "GOOGLE_SERVER_ERROR", - "EXTERNAL_USER_ERROR", - "EXTERNAL_PARTNER_ERROR", - "TIMEOUT_ERROR", - }, - }, - "ekmProvisioningErrorMapping": &dcl.Property{ - Type: "string", - GoName: "EkmProvisioningErrorMapping", - GoType: "WorkloadEkmProvisioningResponseEkmProvisioningErrorMappingEnum", - Description: "Detailed error message if Ekm provisioning fails Possible values: EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED, INVALID_SERVICE_ACCOUNT, MISSING_METRICS_SCOPE_ADMIN_PERMISSION, MISSING_EKM_CONNECTION_ADMIN_PERMISSION", - Immutable: true, - Enum: []string{ - "EKM_PROVISIONING_ERROR_MAPPING_UNSPECIFIED", - "INVALID_SERVICE_ACCOUNT", - "MISSING_METRICS_SCOPE_ADMIN_PERMISSION", - "MISSING_EKM_CONNECTION_ADMIN_PERMISSION", - }, - }, - "ekmProvisioningState": &dcl.Property{ - Type: "string", - GoName: "EkmProvisioningState", - GoType: "WorkloadEkmProvisioningResponseEkmProvisioningStateEnum", - Description: "Indicates Ekm enrollment Provisioning of a given workload. Possible values: EKM_PROVISIONING_STATE_UNSPECIFIED, EKM_PROVISIONING_STATE_PENDING, EKM_PROVISIONING_STATE_FAILED, EKM_PROVISIONING_STATE_COMPLETED", - Immutable: true, - Enum: []string{ - "EKM_PROVISIONING_STATE_UNSPECIFIED", - "EKM_PROVISIONING_STATE_PENDING", - "EKM_PROVISIONING_STATE_FAILED", - "EKM_PROVISIONING_STATE_COMPLETED", - }, - }, - }, - }, - "enableSovereignControls": &dcl.Property{ - Type: "boolean", - GoName: "EnableSovereignControls", - Description: "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", - Immutable: true, - }, - "kajEnrollmentState": &dcl.Property{ - Type: "string", - GoName: "KajEnrollmentState", - GoType: "WorkloadKajEnrollmentStateEnum", - ReadOnly: true, - Description: "Output only. Represents the KAJ enrollment state of the given workload. Possible values: KAJ_ENROLLMENT_STATE_UNSPECIFIED, KAJ_ENROLLMENT_STATE_PENDING, KAJ_ENROLLMENT_STATE_COMPLETE", - Immutable: true, - Enum: []string{ - "KAJ_ENROLLMENT_STATE_UNSPECIFIED", - "KAJ_ENROLLMENT_STATE_PENDING", - "KAJ_ENROLLMENT_STATE_COMPLETE", - }, - }, - "kmsSettings": &dcl.Property{ - Type: "object", - GoName: "KmsSettings", - GoType: "WorkloadKmsSettings", - Description: "**DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field.", - Immutable: true, - Unreadable: true, - Required: []string{ - "nextRotationTime", - "rotationPeriod", - }, - Properties: map[string]*dcl.Property{ - "nextRotationTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "NextRotationTime", - Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", - Immutable: true, - }, - "rotationPeriod": &dcl.Property{ - Type: "string", - GoName: "RotationPeriod", - Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", - Immutable: true, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. Labels applied to the workload.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Output only. The resource name of the workload.", - Immutable: true, - ServerGeneratedParameter: true, - HasLongForm: true, - }, - "organization": &dcl.Property{ - Type: "string", - GoName: "Organization", - Description: "The organization for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Organization", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "partner": &dcl.Property{ - Type: "string", - GoName: "Partner", - GoType: "WorkloadPartnerEnum", - Description: "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", - Immutable: true, - Enum: []string{ - "PARTNER_UNSPECIFIED", - "LOCAL_CONTROLS_BY_S3NS", - "SOVEREIGN_CONTROLS_BY_T_SYSTEMS", - "SOVEREIGN_CONTROLS_BY_SIA_MINSAIT", - "SOVEREIGN_CONTROLS_BY_PSN", - "SOVEREIGN_CONTROLS_BY_CNTXT", - "SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM", - }, - }, - "partnerPermissions": &dcl.Property{ - Type: "object", - GoName: "PartnerPermissions", - GoType: "WorkloadPartnerPermissions", - Description: "Optional. Permissions granted to the AW Partner SA account for the customer workload", - Immutable: true, - Properties: map[string]*dcl.Property{ - "assuredWorkloadsMonitoring": &dcl.Property{ - Type: "boolean", - GoName: "AssuredWorkloadsMonitoring", - Description: "Optional. Allow partner to view violation alerts.", - Immutable: true, - }, - "dataLogsViewer": &dcl.Property{ - Type: "boolean", - GoName: "DataLogsViewer", - Description: "Allow the partner to view inspectability logs and monitoring violations.", - Immutable: true, - }, - "serviceAccessApprover": &dcl.Property{ - Type: "boolean", - GoName: "ServiceAccessApprover", - Description: "Optional. Allow partner to view access approval logs.", - Immutable: true, - }, - }, - }, - "partnerServicesBillingAccount": &dcl.Property{ - Type: "string", - GoName: "PartnerServicesBillingAccount", - Description: "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.", - Immutable: true, - Unreadable: true, - }, - "provisionedResourcesParent": &dcl.Property{ - Type: "string", - GoName: "ProvisionedResourcesParent", - Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}", - Immutable: true, - Unreadable: true, - }, - "resourceSettings": &dcl.Property{ - Type: "array", - GoName: "ResourceSettings", - Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkloadResourceSettings", - Properties: map[string]*dcl.Property{ - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "User-assigned resource display name. If not empty it will be used to create a resource with the specified name.", - Immutable: true, - }, - "resourceId": &dcl.Property{ - Type: "string", - GoName: "ResourceId", - Description: "Resource identifier. For a project this represents projectId. If the project is already taken, the workload creation will fail. For KeyRing, this represents the keyring_id. For a folder, don't set this value as folder_id is assigned by Google.", - Immutable: true, - }, - "resourceType": &dcl.Property{ - Type: "string", - GoName: "ResourceType", - GoType: "WorkloadResourceSettingsResourceTypeEnum", - Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", - Immutable: true, - Enum: []string{ - "RESOURCE_TYPE_UNSPECIFIED", - "CONSUMER_PROJECT", - "ENCRYPTION_KEYS_PROJECT", - "KEYRING", - "CONSUMER_FOLDER", - }, - }, - }, - }, - Unreadable: true, - }, - "resources": &dcl.Property{ - Type: "array", - GoName: "Resources", - ReadOnly: true, - Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkloadResources", - Properties: map[string]*dcl.Property{ - "resourceId": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "ResourceId", - Description: "Resource identifier. For a project this represents project_number.", - Immutable: true, - }, - "resourceType": &dcl.Property{ - Type: "string", - GoName: "ResourceType", - GoType: "WorkloadResourcesResourceTypeEnum", - Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", - Immutable: true, - Enum: []string{ - "RESOURCE_TYPE_UNSPECIFIED", - "CONSUMER_PROJECT", - "ENCRYPTION_KEYS_PROJECT", - "KEYRING", - "CONSUMER_FOLDER", - }, - }, - }, - }, - }, - "saaEnrollmentResponse": &dcl.Property{ - Type: "object", - GoName: "SaaEnrollmentResponse", - GoType: "WorkloadSaaEnrollmentResponse", - ReadOnly: true, - Description: "Output only. Represents the SAA enrollment response of the given workload. SAA enrollment response is queried during workloads.get call. In failure cases, user friendly error message is shown in SAA details page.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "setupErrors": &dcl.Property{ - Type: "array", - GoName: "SetupErrors", - Description: "Indicates SAA enrollment setup error if any.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "WorkloadSaaEnrollmentResponseSetupErrorsEnum", - Enum: []string{ - "SETUP_ERROR_UNSPECIFIED", - "ERROR_INVALID_BASE_SETUP", - "ERROR_MISSING_EXTERNAL_SIGNING_KEY", - "ERROR_NOT_ALL_SERVICES_ENROLLED", - "ERROR_SETUP_CHECK_FAILED", - }, - }, - }, - "setupStatus": &dcl.Property{ - Type: "string", - GoName: "SetupStatus", - GoType: "WorkloadSaaEnrollmentResponseSetupStatusEnum", - Description: "Indicates SAA enrollment status of a given workload. Possible values: SETUP_STATE_UNSPECIFIED, STATUS_PENDING, STATUS_COMPLETE", - Immutable: true, - Enum: []string{ - "SETUP_STATE_UNSPECIFIED", - "STATUS_PENDING", - "STATUS_COMPLETE", - }, - }, - }, - }, - "violationNotificationsEnabled": &dcl.Property{ - Type: "boolean", - GoName: "ViolationNotificationsEnabled", - Description: "Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload.", - Immutable: true, - }, - "workloadOptions": &dcl.Property{ - Type: "object", - GoName: "WorkloadOptions", - GoType: "WorkloadWorkloadOptions", - Description: "Optional. Used to specify certain options for a workload during workload creation - currently only supporting KAT Optionality for Regional Controls workloads.", - Immutable: true, - Unreadable: true, - Properties: map[string]*dcl.Property{ - "kajEnrollmentType": &dcl.Property{ - Type: "string", - GoName: "KajEnrollmentType", - GoType: "WorkloadWorkloadOptionsKajEnrollmentTypeEnum", - Description: "Indicates type of KAJ enrollment for the workload. Currently, only specifiying KEY_ACCESS_TRANSPARENCY_OFF is implemented to not enroll in KAT-level KAJ enrollment for Regional Controls workloads. Possible values: KAJ_ENROLLMENT_TYPE_UNSPECIFIED, FULL_KAJ, EKM_ONLY, KEY_ACCESS_TRANSPARENCY_OFF", - Immutable: true, - Enum: []string{ - "KAJ_ENROLLMENT_TYPE_UNSPECIFIED", - "FULL_KAJ", - "EKM_ONLY", - "KEY_ACCESS_TRANSPARENCY_OFF", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl deleted file mode 100644 index 2d536198254e..000000000000 --- a/mmv1/third_party/terraform/services/cloudbuild/worker_pool_schema.go.tmpl +++ /dev/null @@ -1,296 +0,0 @@ -package cloudbuild - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLWorkerPoolSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "CloudBuild/WorkerPool", - Description: "The CloudBuild WorkerPool resource", - StructName: "WorkerPool", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a WorkerPool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workerPool", - Required: true, - Description: "A full instance of a WorkerPool", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a WorkerPool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workerPool", - Required: true, - Description: "A full instance of a WorkerPool", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a WorkerPool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workerPool", - Required: true, - Description: "A full instance of a WorkerPool", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all WorkerPool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many WorkerPool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "WorkerPool": &dcl.Component{ - Title: "WorkerPool", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workerPools/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Time at which the request to create the `WorkerPool` was received.", - Immutable: true, - }, - "deleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "DeleteTime", - ReadOnly: true, - Description: "Output only. Time at which the request to delete the `WorkerPool` was received.", - Immutable: true, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Output only. Checksum computed by the server. May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "User-defined name of the `WorkerPool`.", - Immutable: true, - HasLongForm: true, - }, - "networkConfig": &dcl.Property{ - Type: "object", - GoName: "NetworkConfig", - GoType: "WorkerPoolNetworkConfig", - Description: "Network configuration for the `WorkerPool`.", - Immutable: true, - Conflicts: []string{ - "privateServiceConnect", - }, - Required: []string{ - "peeredNetwork", - }, - Properties: map[string]*dcl.Property{ - "peeredNetwork": &dcl.Property{ - Type: "string", - GoName: "PeeredNetwork", - Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Network", - Field: "selfLink", - }, - }, - }, - "peeredNetworkIPRange": &dcl.Property{ - Type: "string", - GoName: "PeeredNetworkIPRange", - Description: "Optional. Immutable. Subnet IP range within the peered network. This is specified in CIDR notation with a slash and the subnet prefix size. You can optionally specify an IP address before the subnet prefix value. e.g. `192.168.0.0/29` would specify an IP range starting at 192.168.0.0 with a prefix size of 29 bits. `/16` would specify a prefix size of 16 bits, with an automatically determined IP within the peered VPC. If unspecified, a value of `/24` will be used.", - Immutable: true, - }, - }, - }, - "privateServiceConnect": &dcl.Property{ - Type: "object", - GoName: "PrivateServiceConnect", - GoType: "WorkerPoolPrivateServiceConnect", - Description: "Private Service Connect configuration for the pool.", - Immutable: true, - Conflicts: []string{ - "networkConfig", - }, - Required: []string{ - "networkAttachment", - }, - Properties: map[string]*dcl.Property{ - "networkAttachment": &dcl.Property{ - Type: "string", - GoName: "NetworkAttachment", - Description: "Required. Immutable. The network attachment that the worker network interface is connected to. Must be in the format `projects/{project}/regions/{region}/networkAttachments/{networkAttachment}`. The region of network attachment must be the same as the worker pool. See [Network Attachments](https://cloud.google.com/vpc/docs/about-network-attachments)", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/NetworkAttachment", - Field: "selfLink", - }, - }, - }, - "routeAllTraffic": &dcl.Property{ - Type: "boolean", - GoName: "RouteAllTraffic", - Description: "Immutable. Route all traffic through PSC interface. Enable this if you want full control of traffic in the private pool. Configure Cloud NAT for the subnet of network attachment if you need to access public Internet. If false, Only route private IPs, e.g. 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 through PSC interface.", - Immutable: true, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "WorkerPoolStateEnum", - ReadOnly: true, - Description: "Output only. `WorkerPool` state. Possible values: STATE_UNSPECIFIED, PENDING, APPROVED, REJECTED, CANCELLED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "PENDING", - "APPROVED", - "REJECTED", - "CANCELLED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A unique identifier for the `WorkerPool`.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. Time at which the request to update the `WorkerPool` was received.", - Immutable: true, - }, - "workerConfig": &dcl.Property{ - Type: "object", - GoName: "WorkerConfig", - GoType: "WorkerPoolWorkerConfig", - Description: "Configuration to be used for a creating workers in the `WorkerPool`.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "diskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "DiskSizeGb", - Description: "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", - }, - "enableNestedVirtualization": &dcl.Property{ - Type: "boolean", - GoName: "EnableNestedVirtualization", - Description: "Enable nested virtualization on the worker, if supported by the machine type. See [Worker pool config file](https://cloud.google.com/build/docs/private-pools/worker-pool-config-file-schema). If left blank, Cloud Build will set this to false.", - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Machine type of a worker, such as `n1-standard-1`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.", - }, - "noExternalIP": &dcl.Property{ - Type: "boolean", - GoName: "NoExternalIP", - Description: "If true, workers are created without any public address, which prevents network egress to public IPs.", - ServerDefault: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go deleted file mode 100644 index e471067e3ae6..000000000000 --- a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline_schema.go +++ /dev/null @@ -1,753 +0,0 @@ -package clouddeploy - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLDeliveryPipelineSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Clouddeploy/DeliveryPipeline", - Description: "The Cloud Deploy `DeliveryPipeline` resource", - StructName: "DeliveryPipeline", - Reference: &dcl.Link{ - Text: "REST API", - URL: "https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines", - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a DeliveryPipeline", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "deliveryPipeline", - Required: true, - Description: "A full instance of a DeliveryPipeline", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a DeliveryPipeline", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "deliveryPipeline", - Required: true, - Description: "A full instance of a DeliveryPipeline", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a DeliveryPipeline", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "deliveryPipeline", - Required: true, - Description: "A full instance of a DeliveryPipeline", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all DeliveryPipeline", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many DeliveryPipeline", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "DeliveryPipeline": &dcl.Component{ - Title: "DeliveryPipeline", - ID: "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", - }, - "condition": &dcl.Property{ - Type: "object", - GoName: "Condition", - GoType: "DeliveryPipelineCondition", - ReadOnly: true, - Description: "Output only. Information around the state of the Delivery Pipeline.", - Properties: map[string]*dcl.Property{ - "pipelineReadyCondition": &dcl.Property{ - Type: "object", - GoName: "PipelineReadyCondition", - GoType: "DeliveryPipelineConditionPipelineReadyCondition", - Description: "Details around the Pipeline's overall status.", - Properties: map[string]*dcl.Property{ - "status": &dcl.Property{ - Type: "boolean", - GoName: "Status", - Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last time the condition was updated.", - }, - }, - }, - "targetsPresentCondition": &dcl.Property{ - Type: "object", - GoName: "TargetsPresentCondition", - GoType: "DeliveryPipelineConditionTargetsPresentCondition", - Description: "Details around targets enumerated in the pipeline.", - Properties: map[string]*dcl.Property{ - "missingTargets": &dcl.Property{ - Type: "array", - GoName: "MissingTargets", - Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Clouddeploy/Target", - Field: "selfLink", - }, - }, - }, - }, - "status": &dcl.Property{ - Type: "boolean", - GoName: "Status", - Description: "True if there aren't any missing Targets.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last time the condition was updated.", - }, - }, - }, - "targetsTypeCondition": &dcl.Property{ - Type: "object", - GoName: "TargetsTypeCondition", - GoType: "DeliveryPipelineConditionTargetsTypeCondition", - Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", - Properties: map[string]*dcl.Property{ - "errorDetails": &dcl.Property{ - Type: "string", - GoName: "ErrorDetails", - Description: "Human readable error message.", - }, - "status": &dcl.Property{ - Type: "boolean", - GoName: "Status", - Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", - }, - }, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Time at which the pipeline was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Name of the `DeliveryPipeline`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", - Immutable: true, - Parameter: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "serialPipeline": &dcl.Property{ - Type: "object", - GoName: "SerialPipeline", - GoType: "DeliveryPipelineSerialPipeline", - Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", - Properties: map[string]*dcl.Property{ - "stages": &dcl.Property{ - Type: "array", - GoName: "Stages", - Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DeliveryPipelineSerialPipelineStages", - Properties: map[string]*dcl.Property{ - "deployParameters": &dcl.Property{ - Type: "array", - GoName: "DeployParameters", - Description: "Optional. The deploy parameters to use for the target in this stage.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DeliveryPipelineSerialPipelineStagesDeployParameters", - Required: []string{ - "values", - }, - Properties: map[string]*dcl.Property{ - "matchTargetLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "MatchTargetLabels", - Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", - }, - "values": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Values", - Description: "Required. Values are deploy parameters in key-value pairs.", - }, - }, - }, - }, - "profiles": &dcl.Property{ - Type: "array", - GoName: "Profiles", - Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "strategy": &dcl.Property{ - Type: "object", - GoName: "Strategy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategy", - Description: "Optional. The strategy to use for a `Rollout` to this stage.", - Properties: map[string]*dcl.Property{ - "canary": &dcl.Property{ - Type: "object", - GoName: "Canary", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanary", - Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", - Properties: map[string]*dcl.Property{ - "canaryDeployment": &dcl.Property{ - Type: "object", - GoName: "CanaryDeployment", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", - Description: "Configures the progressive based deployment for a Target.", - Conflicts: []string{ - "customCanaryDeployment", - }, - Required: []string{ - "percentages", - }, - Properties: map[string]*dcl.Property{ - "percentages": &dcl.Property{ - Type: "array", - GoName: "Percentages", - Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "integer", - Format: "int64", - GoType: "int64", - }, - }, - "postdeploy": &dcl.Property{ - Type: "object", - GoName: "Postdeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPostdeploy", - Description: "Optional. Configuration for the postdeploy job of the last phase. If this is not configured, postdeploy job will not be present.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "predeploy": &dcl.Property{ - Type: "object", - GoName: "Predeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentPredeploy", - Description: "Optional. Configuration for the predeploy job of the first phase. If this is not configured, predeploy job will not be present.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "verify": &dcl.Property{ - Type: "boolean", - GoName: "Verify", - Description: "Whether to run verify tests after each percentage deployment.", - }, - }, - }, - "customCanaryDeployment": &dcl.Property{ - Type: "object", - GoName: "CustomCanaryDeployment", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", - Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", - Conflicts: []string{ - "canaryDeployment", - }, - Required: []string{ - "phaseConfigs", - }, - Properties: map[string]*dcl.Property{ - "phaseConfigs": &dcl.Property{ - Type: "array", - GoName: "PhaseConfigs", - Description: "Required. Configuration for each phase in the canary deployment in the order executed.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", - Required: []string{ - "phaseId", - "percentage", - }, - Properties: map[string]*dcl.Property{ - "percentage": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Percentage", - Description: "Required. Percentage deployment for the phase.", - }, - "phaseId": &dcl.Property{ - Type: "string", - GoName: "PhaseId", - Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", - }, - "postdeploy": &dcl.Property{ - Type: "object", - GoName: "Postdeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPostdeploy", - Description: "Optional. Configuration for the postdeploy job of this phase. If this is not configured, postdeploy job will not be present for this phase.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "predeploy": &dcl.Property{ - Type: "object", - GoName: "Predeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsPredeploy", - Description: "Optional. Configuration for the predeploy job of this phase. If this is not configured, predeploy job will not be present for this phase.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "profiles": &dcl.Property{ - Type: "array", - GoName: "Profiles", - Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "verify": &dcl.Property{ - Type: "boolean", - GoName: "Verify", - Description: "Whether to run verify tests after the deployment.", - }, - }, - }, - }, - }, - }, - "runtimeConfig": &dcl.Property{ - Type: "object", - GoName: "RuntimeConfig", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", - Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", - Properties: map[string]*dcl.Property{ - "cloudRun": &dcl.Property{ - Type: "object", - GoName: "CloudRun", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", - Description: "Cloud Run runtime configuration.", - Conflicts: []string{ - "kubernetes", - }, - Properties: map[string]*dcl.Property{ - "automaticTrafficControl": &dcl.Property{ - Type: "boolean", - GoName: "AutomaticTrafficControl", - Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", - }, - "canaryRevisionTags": &dcl.Property{ - Type: "array", - GoName: "CanaryRevisionTags", - Description: "Optional. A list of tags that are added to the canary revision while the canary phase is in progress.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "priorRevisionTags": &dcl.Property{ - Type: "array", - GoName: "PriorRevisionTags", - Description: "Optional. A list of tags that are added to the prior revision while the canary phase is in progress.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "stableRevisionTags": &dcl.Property{ - Type: "array", - GoName: "StableRevisionTags", - Description: "Optional. A list of tags that are added to the final stable revision when the stable phase is applied.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "kubernetes": &dcl.Property{ - Type: "object", - GoName: "Kubernetes", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", - Description: "Kubernetes runtime configuration.", - Conflicts: []string{ - "cloudRun", - }, - Properties: map[string]*dcl.Property{ - "gatewayServiceMesh": &dcl.Property{ - Type: "object", - GoName: "GatewayServiceMesh", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", - Description: "Kubernetes Gateway API service mesh configuration.", - Conflicts: []string{ - "serviceNetworking", - }, - Required: []string{ - "httpRoute", - "service", - "deployment", - }, - Properties: map[string]*dcl.Property{ - "deployment": &dcl.Property{ - Type: "string", - GoName: "Deployment", - Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", - }, - "httpRoute": &dcl.Property{ - Type: "string", - GoName: "HttpRoute", - Description: "Required. Name of the Gateway API HTTPRoute.", - }, - "podSelectorLabel": &dcl.Property{ - Type: "string", - GoName: "PodSelectorLabel", - Description: "Optional. The label to use when selecting Pods for the Deployment and Service resources. This label must already be present in both resources.", - }, - "routeDestinations": &dcl.Property{ - Type: "object", - GoName: "RouteDestinations", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshRouteDestinations", - Description: "Optional. Route destinations allow configuring the Gateway API HTTPRoute to be deployed to additional clusters. This option is available for multi-cluster service mesh set ups that require the route to exist in the clusters that call the service. If unspecified, the HTTPRoute will only be deployed to the Target cluster.", - Required: []string{ - "destinationIds", - }, - Properties: map[string]*dcl.Property{ - "destinationIds": &dcl.Property{ - Type: "array", - GoName: "DestinationIds", - Description: "Required. The clusters where the Gateway API HTTPRoute resource will be deployed to. Valid entries include the associated entities IDs configured in the Target resource and \"@self\" to include the Target cluster.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "propagateService": &dcl.Property{ - Type: "boolean", - GoName: "PropagateService", - Description: "Optional. Whether to propagate the Kubernetes Service to the route destination clusters. The Service will always be deployed to the Target cluster even if the HTTPRoute is not. This option may be used to facilitiate successful DNS lookup in the route destination clusters. Can only be set to true if destinations are specified.", - }, - }, - }, - "routeUpdateWaitTime": &dcl.Property{ - Type: "string", - GoName: "RouteUpdateWaitTime", - Description: "Optional. The time to wait for route updates to propagate. The maximum configurable time is 3 hours, in seconds format. If unspecified, there is no wait time.", - }, - "service": &dcl.Property{ - Type: "string", - GoName: "Service", - Description: "Required. Name of the Kubernetes Service.", - }, - "stableCutbackDuration": &dcl.Property{ - Type: "string", - GoName: "StableCutbackDuration", - Description: "Optional. The amount of time to migrate traffic back from the canary Service to the original Service during the stable phase deployment. If specified, must be between 15s and 3600s. If unspecified, there is no cutback time.", - }, - }, - }, - "serviceNetworking": &dcl.Property{ - Type: "object", - GoName: "ServiceNetworking", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", - Description: "Kubernetes Service networking configuration.", - Conflicts: []string{ - "gatewayServiceMesh", - }, - Required: []string{ - "service", - "deployment", - }, - Properties: map[string]*dcl.Property{ - "deployment": &dcl.Property{ - Type: "string", - GoName: "Deployment", - Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", - }, - "disablePodOverprovisioning": &dcl.Property{ - Type: "boolean", - GoName: "DisablePodOverprovisioning", - Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", - }, - "podSelectorLabel": &dcl.Property{ - Type: "string", - GoName: "PodSelectorLabel", - Description: "Optional. The label to use when selecting Pods for the Deployment resource. This label must already be present in the Deployment.", - }, - "service": &dcl.Property{ - Type: "string", - GoName: "Service", - Description: "Required. Name of the Kubernetes Service.", - }, - }, - }, - }, - }, - }, - }, - }, - }, - "standard": &dcl.Property{ - Type: "object", - GoName: "Standard", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandard", - Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", - Properties: map[string]*dcl.Property{ - "postdeploy": &dcl.Property{ - Type: "object", - GoName: "Postdeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandardPostdeploy", - Description: "Optional. Configuration for the postdeploy job. If this is not configured, postdeploy job will not be present.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the postdeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "predeploy": &dcl.Property{ - Type: "object", - GoName: "Predeploy", - GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandardPredeploy", - Description: "Optional. Configuration for the predeploy job. If this is not configured, predeploy job will not be present.", - Properties: map[string]*dcl.Property{ - "actions": &dcl.Property{ - Type: "array", - GoName: "Actions", - Description: "Optional. A sequence of skaffold custom actions to invoke during execution of the predeploy job.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "verify": &dcl.Property{ - Type: "boolean", - GoName: "Verify", - Description: "Whether to verify a deployment.", - }, - }, - }, - }, - }, - "targetId": &dcl.Property{ - Type: "string", - GoName: "TargetId", - Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", - }, - }, - }, - }, - }, - }, - "suspended": &dcl.Property{ - Type: "boolean", - GoName: "Suspended", - Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. Unique identifier of the `DeliveryPipeline`.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. Most recent time at which the pipeline was updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/clouddeploy/target_schema.go b/mmv1/third_party/terraform/services/clouddeploy/target_schema.go deleted file mode 100644 index e5c627cd187c..000000000000 --- a/mmv1/third_party/terraform/services/clouddeploy/target_schema.go +++ /dev/null @@ -1,488 +0,0 @@ -package clouddeploy - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLTargetSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Clouddeploy/Target", - Description: "The Cloud Deploy `Target` resource", - StructName: "Target", - Reference: &dcl.Link{ - Text: "REST API", - URL: "https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets", - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Target", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "target", - Required: true, - Description: "A full instance of a Target", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Target", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "target", - Required: true, - Description: "A full instance of a Target", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Target", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "target", - Required: true, - Description: "A full instance of a Target", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Target", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Target", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Target": &dcl.Component{ - Title: "Target", - ID: "projects/{{project}}/locations/{{location}}/targets/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "Optional. User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", - }, - "anthosCluster": &dcl.Property{ - Type: "object", - GoName: "AnthosCluster", - GoType: "TargetAnthosCluster", - Description: "Information specifying an Anthos Cluster.", - Conflicts: []string{ - "gke", - "run", - "multiTarget", - "customTarget", - }, - Properties: map[string]*dcl.Property{ - "membership": &dcl.Property{ - Type: "string", - GoName: "Membership", - Description: "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkehub/Membership", - Field: "selfLink", - }, - }, - }, - }, - }, - "associatedEntities": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "object", - GoType: "TargetAssociatedEntities", - Properties: map[string]*dcl.Property{ - "anthosClusters": &dcl.Property{ - Type: "array", - GoName: "AnthosClusters", - Description: "Optional. Information specifying Anthos clusters as associated entities.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "TargetAssociatedEntitiesAnthosClusters", - Properties: map[string]*dcl.Property{ - "membership": &dcl.Property{ - Type: "string", - GoName: "Membership", - Description: "Optional. Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkehub/Membership", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "gkeClusters": &dcl.Property{ - Type: "array", - GoName: "GkeClusters", - Description: "Optional. Information specifying GKE clusters as associated entities.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "TargetAssociatedEntitiesGkeClusters", - Properties: map[string]*dcl.Property{ - "cluster": &dcl.Property{ - Type: "string", - GoName: "Cluster", - Description: "Optional. Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}`.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "selfLink", - }, - }, - }, - "internalIP": &dcl.Property{ - Type: "boolean", - GoName: "InternalIP", - Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", - }, - "proxyUrl": &dcl.Property{ - Type: "string", - GoName: "ProxyUrl", - Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", - }, - }, - }, - }, - }, - }, - GoName: "AssociatedEntities", - Description: "Optional. Map of entity IDs to their associated entities. Associated entities allows specifying places other than the deployment target for specific features. For example, the Gateway API canary can be configured to deploy the HTTPRoute to a different cluster(s) than the deployment cluster using associated entities. An entity ID must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Time at which the `Target` was created.", - Immutable: true, - }, - "customTarget": &dcl.Property{ - Type: "object", - GoName: "CustomTarget", - GoType: "TargetCustomTarget", - Description: "Optional. Information specifying a Custom Target.", - Conflicts: []string{ - "gke", - "anthosCluster", - "run", - "multiTarget", - }, - Required: []string{ - "customTargetType", - }, - Properties: map[string]*dcl.Property{ - "customTargetType": &dcl.Property{ - Type: "string", - GoName: "CustomTargetType", - Description: "Required. The name of the CustomTargetType. Format must be `projects/{project}/locations/{location}/customTargetTypes/{custom_target_type}`.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Clouddeploy/CustomTargetType", - Field: "selfLink", - }, - }, - }, - }, - }, - "deployParameters": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DeployParameters", - Description: "Optional. The deploy parameters to use for this target.", - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. Description of the `Target`. Max length is 255 characters.", - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "executionConfigs": &dcl.Property{ - Type: "array", - GoName: "ExecutionConfigs", - Description: "Configurations for all execution that relates to this `Target`. Each `ExecutionEnvironmentUsage` value may only be used in a single configuration; using the same value multiple times is an error. When one or more configurations are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage` values. When no configurations are specified, execution will use the default specified in `DefaultPool`.", - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "TargetExecutionConfigs", - Required: []string{ - "usages", - }, - Properties: map[string]*dcl.Property{ - "artifactStorage": &dcl.Property{ - Type: "string", - GoName: "ArtifactStorage", - Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", - ServerDefault: true, - }, - "executionTimeout": &dcl.Property{ - Type: "string", - GoName: "ExecutionTimeout", - Description: "Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used.", - ServerDefault: true, - }, - "serviceAccount": &dcl.Property{ - Type: "string", - GoName: "ServiceAccount", - Description: "Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used.", - ServerDefault: true, - }, - "usages": &dcl.Property{ - Type: "array", - GoName: "Usages", - Description: "Required. Usages when this configuration should be applied.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "TargetExecutionConfigsUsagesEnum", - Enum: []string{ - "EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED", - "RENDER", - "DEPLOY", - }, - }, - }, - "verbose": &dcl.Property{ - Type: "boolean", - GoName: "Verbose", - Description: "Optional. If true, additional logging will be enabled when running builds in this execution environment.", - }, - "workerPool": &dcl.Property{ - Type: "string", - GoName: "WorkerPool", - Description: "Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudbuild/WorkerPool", - Field: "selfLink", - }, - }, - }, - }, - }, - }, - "gke": &dcl.Property{ - Type: "object", - GoName: "Gke", - GoType: "TargetGke", - Description: "Information specifying a GKE Cluster.", - Conflicts: []string{ - "anthosCluster", - "run", - "multiTarget", - "customTarget", - }, - Properties: map[string]*dcl.Property{ - "cluster": &dcl.Property{ - Type: "string", - GoName: "Cluster", - Description: "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "selfLink", - }, - }, - }, - "dnsEndpoint": &dcl.Property{ - Type: "boolean", - GoName: "DnsEndpoint", - Description: "Optional. If set, the cluster will be accessed using the DNS endpoint. Note that both `dns_endpoint` and `internal_ip` cannot be set to true.", - }, - "internalIP": &dcl.Property{ - Type: "boolean", - GoName: "InternalIP", - Description: "Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).", - }, - "proxyUrl": &dcl.Property{ - Type: "string", - GoName: "ProxyUrl", - Description: "Optional. If set, used to configure a [proxy](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#proxy) to the Kubernetes server.", - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "multiTarget": &dcl.Property{ - Type: "object", - GoName: "MultiTarget", - GoType: "TargetMultiTarget", - Description: "Information specifying a multiTarget.", - Conflicts: []string{ - "gke", - "anthosCluster", - "run", - "customTarget", - }, - Required: []string{ - "targetIds", - }, - Properties: map[string]*dcl.Property{ - "targetIds": &dcl.Property{ - Type: "array", - GoName: "TargetIds", - Description: "Required. The target_ids of this multiTarget.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Name of the `Target`. Format is `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.", - Immutable: true, - Parameter: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "requireApproval": &dcl.Property{ - Type: "boolean", - GoName: "RequireApproval", - Description: "Optional. Whether or not the `Target` requires approval.", - }, - "run": &dcl.Property{ - Type: "object", - GoName: "Run", - GoType: "TargetRun", - Description: "Information specifying a Cloud Run deployment target.", - Conflicts: []string{ - "gke", - "anthosCluster", - "multiTarget", - "customTarget", - }, - Required: []string{ - "location", - }, - Properties: map[string]*dcl.Property{ - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", - }, - }, - }, - "targetId": &dcl.Property{ - Type: "string", - GoName: "TargetId", - ReadOnly: true, - Description: "Output only. Resource id of the `Target`.", - Immutable: true, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. Unique identifier of the `Target`.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. Most recent time at which the `Target` was updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl deleted file mode 100644 index ce4ea97c67fd..000000000000 --- a/mmv1/third_party/terraform/services/containeraws/cluster_schema.go.tmpl +++ /dev/null @@ -1,754 +0,0 @@ -package containeraws - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLClusterSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "ContainerAws/Cluster", - Description: "An Anthos cluster running on AWS.", - StructName: "Cluster", - Reference: &dcl.Link{ - Text: "API reference", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Multicloud overview", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Cluster": &dcl.Component{ - Title: "Cluster", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "networking", - "awsRegion", - "controlPlane", - "authorization", - "project", - "location", - "fleet", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", - }, - "authorization": &dcl.Property{ - Type: "object", - GoName: "Authorization", - GoType: "ClusterAuthorization", - Description: "Configuration related to the cluster RBAC settings.", - Required: []string{ - "adminUsers", - }, - Properties: map[string]*dcl.Property{ - "adminGroups": &dcl.Property{ - Type: "array", - GoName: "AdminGroups", - Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterAuthorizationAdminGroups", - Required: []string{ - "group", - }, - Properties: map[string]*dcl.Property{ - "group": &dcl.Property{ - Type: "string", - GoName: "Group", - Description: "The name of the group, e.g. `my-group@domain.com`.", - }, - }, - }, - }, - "adminUsers": &dcl.Property{ - Type: "array", - GoName: "AdminUsers", - Description: "Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterAuthorizationAdminUsers", - Required: []string{ - "username", - }, - Properties: map[string]*dcl.Property{ - "username": &dcl.Property{ - Type: "string", - GoName: "Username", - Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", - }, - }, - }, - }, - }, - }, - "awsRegion": &dcl.Property{ - Type: "string", - GoName: "AwsRegion", - Description: "The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region.", - Immutable: true, - }, - "binaryAuthorization": &dcl.Property{ - Type: "object", - GoName: "BinaryAuthorization", - GoType: "ClusterBinaryAuthorization", - Description: "Configuration options for the Binary Authorization feature.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "evaluationMode": &dcl.Property{ - Type: "string", - GoName: "EvaluationMode", - GoType: "ClusterBinaryAuthorizationEvaluationModeEnum", - Description: "Mode of operation for Binary Authorization policy evaluation. Possible values: DISABLED, PROJECT_SINGLETON_POLICY_ENFORCE", - ServerDefault: true, - Enum: []string{ - "DISABLED", - "PROJECT_SINGLETON_POLICY_ENFORCE", - }, - }, - }, - }, - "controlPlane": &dcl.Property{ - Type: "object", - GoName: "ControlPlane", - GoType: "ClusterControlPlane", - Description: "Configuration related to the cluster control plane.", - Required: []string{ - "version", - "subnetIds", - "configEncryption", - "iamInstanceProfile", - "databaseEncryption", - "awsServicesAuthentication", - }, - Properties: map[string]*dcl.Property{ - "awsServicesAuthentication": &dcl.Property{ - Type: "object", - GoName: "AwsServicesAuthentication", - GoType: "ClusterControlPlaneAwsServicesAuthentication", - Description: "Authentication configuration for management of AWS resources.", - Required: []string{ - "roleArn", - }, - Properties: map[string]*dcl.Property{ - "roleArn": &dcl.Property{ - Type: "string", - GoName: "RoleArn", - Description: "The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account.", - }, - "roleSessionName": &dcl.Property{ - Type: "string", - GoName: "RoleSessionName", - Description: "Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`.", - ServerDefault: true, - }, - }, - }, - "configEncryption": &dcl.Property{ - Type: "object", - GoName: "ConfigEncryption", - GoType: "ClusterControlPlaneConfigEncryption", - Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", - Required: []string{ - "kmsKeyArn", - }, - Properties: map[string]*dcl.Property{ - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "The ARN of the AWS KMS key used to encrypt cluster configuration.", - }, - }, - }, - "databaseEncryption": &dcl.Property{ - Type: "object", - GoName: "DatabaseEncryption", - GoType: "ClusterControlPlaneDatabaseEncryption", - Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", - Immutable: true, - Required: []string{ - "kmsKeyArn", - }, - Properties: map[string]*dcl.Property{ - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "The ARN of the AWS KMS key used to encrypt cluster secrets.", - Immutable: true, - }, - }, - }, - "iamInstanceProfile": &dcl.Property{ - Type: "string", - GoName: "IamInstanceProfile", - Description: "The name of the AWS IAM instance pofile to assign to each control plane replica.", - }, -{{- if ne $.TargetVersionName "ga" }} - "instancePlacement": &dcl.Property{ - Type: "object", - GoName: "InstancePlacement", - GoType: "ClusterControlPlaneInstancePlacement", - Description: "Details of placement information for an instance.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "tenancy": &dcl.Property{ - Type: "string", - GoName: "Tenancy", - GoType: "ClusterControlPlaneInstancePlacementTenancyEnum", - Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", - Immutable: true, - ServerDefault: true, - Enum: []string{ - "TENANCY_UNSPECIFIED", - "DEFAULT", - "DEDICATED", - "HOST", - }, - }, - }, - }, -{{- end }} - "instanceType": &dcl.Property{ - Type: "string", - GoName: "InstanceType", - Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", - ServerDefault: true, - }, - "mainVolume": &dcl.Property{ - Type: "object", - GoName: "MainVolume", - GoType: "ClusterControlPlaneMainVolume", - Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "iops": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Iops", - Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", - Immutable: true, - ServerDefault: true, - }, - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", - Immutable: true, - }, - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - Immutable: true, - ServerDefault: true, - }, - "throughput": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Throughput", - Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", - Immutable: true, - ServerDefault: true, - }, - "volumeType": &dcl.Property{ - Type: "string", - GoName: "VolumeType", - GoType: "ClusterControlPlaneMainVolumeVolumeTypeEnum", - Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", - Immutable: true, - ServerDefault: true, - Enum: []string{ - "VOLUME_TYPE_UNSPECIFIED", - "GP2", - "GP3", - }, - }, - }, - }, - "proxyConfig": &dcl.Property{ - Type: "object", - GoName: "ProxyConfig", - GoType: "ClusterControlPlaneProxyConfig", - Description: "Proxy configuration for outbound HTTP(S) traffic.", - Required: []string{ - "secretArn", - "secretVersion", - }, - Properties: map[string]*dcl.Property{ - "secretArn": &dcl.Property{ - Type: "string", - GoName: "SecretArn", - Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", - }, - "secretVersion": &dcl.Property{ - Type: "string", - GoName: "SecretVersion", - Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", - }, - }, - }, - "rootVolume": &dcl.Property{ - Type: "object", - GoName: "RootVolume", - GoType: "ClusterControlPlaneRootVolume", - Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "iops": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Iops", - Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", - ServerDefault: true, - }, - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", - }, - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - ServerDefault: true, - }, - "throughput": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Throughput", - Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", - ServerDefault: true, - }, - "volumeType": &dcl.Property{ - Type: "string", - GoName: "VolumeType", - GoType: "ClusterControlPlaneRootVolumeVolumeTypeEnum", - Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", - ServerDefault: true, - Enum: []string{ - "VOLUME_TYPE_UNSPECIFIED", - "GP2", - "GP3", - }, - }, - }, - }, - "securityGroupIds": &dcl.Property{ - Type: "array", - GoName: "SecurityGroupIds", - Description: "Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "sshConfig": &dcl.Property{ - Type: "object", - GoName: "SshConfig", - GoType: "ClusterControlPlaneSshConfig", - Description: "Optional. SSH configuration for how to access the underlying control plane machines.", - Required: []string{ - "ec2KeyPair", - }, - Properties: map[string]*dcl.Property{ - "ec2KeyPair": &dcl.Property{ - Type: "string", - GoName: "Ec2KeyPair", - Description: "The name of the EC2 key pair used to login into cluster machines.", - }, - }, - }, - "subnetIds": &dcl.Property{ - Type: "array", - GoName: "SubnetIds", - Description: "The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "tags": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Tags", - Description: "Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .", - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time at which this cluster was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", - }, - "endpoint": &dcl.Property{ - Type: "string", - GoName: "Endpoint", - ReadOnly: true, - Description: "Output only. The endpoint of the cluster's API server.", - Immutable: true, - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "fleet": &dcl.Property{ - Type: "object", - GoName: "Fleet", - GoType: "ClusterFleet", - Description: "Fleet configuration.", - Immutable: true, - Required: []string{ - "project", - }, - Properties: map[string]*dcl.Property{ - "membership": &dcl.Property{ - Type: "string", - GoName: "Membership", - ReadOnly: true, - Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The number of the Fleet host project where this cluster will be registered.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - HasLongForm: true, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, -{{- if ne $.TargetVersionName "ga" }} - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "ClusterLoggingConfig", - Description: "Logging configuration.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "componentConfig": &dcl.Property{ - Type: "object", - GoName: "ComponentConfig", - GoType: "ClusterLoggingConfigComponentConfig", - Description: "Configuration of the logging components.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "enableComponents": &dcl.Property{ - Type: "array", - GoName: "EnableComponents", - Description: "Components of the logging configuration to be enabled.", - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", - Enum: []string{ - "COMPONENT_UNSPECIFIED", - "SYSTEM_COMPONENTS", - "WORKLOADS", - }, - }, - }, - }, - }, - }, - }, - "monitoringConfig": &dcl.Property{ - Type: "object", - GoName: "MonitoringConfig", - GoType: "ClusterMonitoringConfig", - Description: "Monitoring configuration.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "managedPrometheusConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedPrometheusConfig", - GoType: "ClusterMonitoringConfigManagedPrometheusConfig", - Description: "Configuration of the Google Cloud Managed Service for Prometheus.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Configuration of the enable Managed Collection.", - ServerDefault: true, - }, - }, - }, - }, - }, -{{- end }} - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of this resource.", - Immutable: true, - HasLongForm: true, - }, - "networking": &dcl.Property{ - Type: "object", - GoName: "Networking", - GoType: "ClusterNetworking", - Description: "Cluster-wide networking configuration.", - Required: []string{ - "vpcId", - "podAddressCidrBlocks", - "serviceAddressCidrBlocks", - }, - Properties: map[string]*dcl.Property{ - "perNodePoolSgRulesDisabled": &dcl.Property{ - Type: "boolean", - GoName: "PerNodePoolSgRulesDisabled", - Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", - }, - "podAddressCidrBlocks": &dcl.Property{ - Type: "array", - GoName: "PodAddressCidrBlocks", - Description: "All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "serviceAddressCidrBlocks": &dcl.Property{ - Type: "array", - GoName: "ServiceAddressCidrBlocks", - Description: "All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "vpcId": &dcl.Property{ - Type: "string", - GoName: "VPCId", - Description: "The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", - Immutable: true, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "reconciling": &dcl.Property{ - Type: "boolean", - GoName: "Reconciling", - ReadOnly: true, - Description: "Output only. If set, there are currently changes in flight to the cluster.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "ClusterStateEnum", - ReadOnly: true, - Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RECONCILING", - "STOPPING", - "ERROR", - "DEGRADED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A globally unique identifier for the cluster.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time at which this cluster was last updated.", - Immutable: true, - }, - "workloadIdentityConfig": &dcl.Property{ - Type: "object", - GoName: "WorkloadIdentityConfig", - GoType: "ClusterWorkloadIdentityConfig", - ReadOnly: true, - Description: "Output only. Workload Identity settings.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "identityProvider": &dcl.Property{ - Type: "string", - GoName: "IdentityProvider", - Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", - Immutable: true, - }, - "issuerUri": &dcl.Property{ - Type: "string", - GoName: "IssuerUri", - Description: "The OIDC issuer URL for this cluster.", - Immutable: true, - }, - "workloadPool": &dcl.Property{ - Type: "string", - GoName: "WorkloadPool", - Description: "The Workload Identity Pool associated to the cluster.", - Immutable: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl deleted file mode 100644 index 8e8a935379cc..000000000000 --- a/mmv1/third_party/terraform/services/containeraws/node_pool_schema.go.tmpl +++ /dev/null @@ -1,661 +0,0 @@ -package containeraws - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLNodePoolSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "ContainerAws/NodePool", - Description: "An Anthos node pool running on AWS.", - StructName: "NodePool", - Reference: &dcl.Link{ - Text: "API reference", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.awsClusters.awsNodePools", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Multicloud overview", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "cluster", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "cluster", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "NodePool": &dcl.Component{ - Title: "NodePool", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/awsClusters/{{ "{{" }}cluster{{ "}}" }}/awsNodePools/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "version", - "config", - "autoscaling", - "subnetId", - "maxPodsConstraint", - "project", - "location", - "cluster", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", - }, - "autoscaling": &dcl.Property{ - Type: "object", - GoName: "Autoscaling", - GoType: "NodePoolAutoscaling", - Description: "Autoscaler configuration for this node pool.", - Required: []string{ - "minNodeCount", - "maxNodeCount", - }, - Properties: map[string]*dcl.Property{ - "maxNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxNodeCount", - Description: "Maximum number of nodes in the NodePool. Must be >= min_node_count.", - }, - "minNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MinNodeCount", - Description: "Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", - }, - }, - }, - "cluster": &dcl.Property{ - Type: "string", - GoName: "Cluster", - Description: "The awsCluster for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkemulticloud/Cluster", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "config": &dcl.Property{ - Type: "object", - GoName: "Config", - GoType: "NodePoolConfig", - Description: "The configuration of the node pool.", - Required: []string{ - "iamInstanceProfile", - "configEncryption", - }, - Properties: map[string]*dcl.Property{ - "autoscalingMetricsCollection": &dcl.Property{ - Type: "object", - GoName: "AutoscalingMetricsCollection", - GoType: "NodePoolConfigAutoscalingMetricsCollection", - Description: "Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled.", - Required: []string{ - "granularity", - }, - Properties: map[string]*dcl.Property{ - "granularity": &dcl.Property{ - Type: "string", - GoName: "Granularity", - Description: "The frequency at which EC2 Auto Scaling sends aggregated data to AWS CloudWatch. The only valid value is \"1Minute\".", - }, - "metrics": &dcl.Property{ - Type: "array", - GoName: "Metrics", - Description: "The metrics to enable. For a list of valid metrics, see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html. If you specify granularity and don't specify any metrics, all metrics are enabled.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "configEncryption": &dcl.Property{ - Type: "object", - GoName: "ConfigEncryption", - GoType: "NodePoolConfigConfigEncryption", - Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", - Required: []string{ - "kmsKeyArn", - }, - Properties: map[string]*dcl.Property{ - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "The ARN of the AWS KMS key used to encrypt node pool configuration.", - }, - }, - }, - "iamInstanceProfile": &dcl.Property{ - Type: "string", - GoName: "IamInstanceProfile", - Description: "The name of the AWS IAM role assigned to nodes in the pool.", - }, -{{- if ne $.TargetVersionName "ga" }} - "imageType": &dcl.Property{ - Type: "string", - GoName: "ImageType", - Description: "The OS image type to use on node pool instances.", - Immutable: true, - ServerDefault: true, - }, - "instancePlacement": &dcl.Property{ - Type: "object", - GoName: "InstancePlacement", - GoType: "NodePoolConfigInstancePlacement", - Description: "Details of placement information for an instance.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "tenancy": &dcl.Property{ - Type: "string", - GoName: "Tenancy", - GoType: "NodePoolConfigInstancePlacementTenancyEnum", - Description: "The tenancy for the instance. Possible values: TENANCY_UNSPECIFIED, DEFAULT, DEDICATED, HOST", - Immutable: true, - ServerDefault: true, - Enum: []string{ - "TENANCY_UNSPECIFIED", - "DEFAULT", - "DEDICATED", - "HOST", - }, - }, - }, - }, -{{- end }} - "instanceType": &dcl.Property{ - Type: "string", - GoName: "InstanceType", - Description: "Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.", - ServerDefault: true, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", - }, - "proxyConfig": &dcl.Property{ - Type: "object", - GoName: "ProxyConfig", - GoType: "NodePoolConfigProxyConfig", - Description: "Proxy configuration for outbound HTTP(S) traffic.", - Required: []string{ - "secretArn", - "secretVersion", - }, - Properties: map[string]*dcl.Property{ - "secretArn": &dcl.Property{ - Type: "string", - GoName: "SecretArn", - Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", - }, - "secretVersion": &dcl.Property{ - Type: "string", - GoName: "SecretVersion", - Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", - }, - }, - }, - "rootVolume": &dcl.Property{ - Type: "object", - GoName: "RootVolume", - GoType: "NodePoolConfigRootVolume", - Description: "Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "iops": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Iops", - Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", - ServerDefault: true, - }, - "kmsKeyArn": &dcl.Property{ - Type: "string", - GoName: "KmsKeyArn", - Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", - }, - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - ServerDefault: true, - }, - "throughput": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Throughput", - Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3. If volume type is gp3 and throughput is not specified, the throughput will defaults to 125.", - ServerDefault: true, - }, - "volumeType": &dcl.Property{ - Type: "string", - GoName: "VolumeType", - GoType: "NodePoolConfigRootVolumeVolumeTypeEnum", - Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", - ServerDefault: true, - Enum: []string{ - "VOLUME_TYPE_UNSPECIFIED", - "GP2", - "GP3", - }, - }, - }, - }, - "securityGroupIds": &dcl.Property{ - Type: "array", - GoName: "SecurityGroupIds", - Description: "Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", -{{- if ne $.TargetVersionName "ga" }} - }, - }, - "spotConfig": &dcl.Property{ - Type: "object", - GoName: "SpotConfig", - GoType: "NodePoolConfigSpotConfig", - Description: "Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`", - Immutable: true, - Required: []string{ - "instanceTypes", - }, - Properties: map[string]*dcl.Property{ - "instanceTypes": &dcl.Property{ - Type: "array", - GoName: "InstanceTypes", - Description: "List of AWS EC2 instance types for creating a spot node pool's nodes. The specified instance types must have the same number of CPUs and memory. You can use the Amazon EC2 Instance Selector tool (https://github.com/aws/amazon-ec2-instance-selector) to choose instance types with matching CPU and memory", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, -{{- end }} - }, - }, - "sshConfig": &dcl.Property{ - Type: "object", - GoName: "SshConfig", - GoType: "NodePoolConfigSshConfig", - Description: "Optional. The SSH configuration.", - Required: []string{ - "ec2KeyPair", - }, - Properties: map[string]*dcl.Property{ - "ec2KeyPair": &dcl.Property{ - Type: "string", - GoName: "Ec2KeyPair", - Description: "The name of the EC2 key pair used to login into cluster machines.", - }, - }, - }, - "tags": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Tags", - Description: "Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", - }, - "taints": &dcl.Property{ - Type: "array", - GoName: "Taints", - Description: "Optional. The initial taints assigned to nodes of this node pool.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "NodePoolConfigTaints", - Required: []string{ - "key", - "value", - "effect", - }, - Properties: map[string]*dcl.Property{ - "effect": &dcl.Property{ - Type: "string", - GoName: "Effect", - GoType: "NodePoolConfigTaintsEffectEnum", - Description: "The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE", - Immutable: true, - Enum: []string{ - "EFFECT_UNSPECIFIED", - "NO_SCHEDULE", - "PREFER_NO_SCHEDULE", - "NO_EXECUTE", - }, - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "Key for the taint.", - Immutable: true, - }, - "value": &dcl.Property{ - Type: "string", - GoName: "Value", - Description: "Value for the taint.", - Immutable: true, - }, - }, - }, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time at which this node pool was created.", - Immutable: true, - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "kubeletConfig": &dcl.Property{ - Type: "object", - GoName: "KubeletConfig", - GoType: "NodePoolKubeletConfig", - Description: "The kubelet configuration for the node pool.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "cpuCfsQuota": &dcl.Property{ - Type: "boolean", - GoName: "CpuCfsQuota", - Description: "Whether or not to enable CPU CFS quota. Defaults to true.", - Immutable: true, - ServerDefault: true, - }, - "cpuCfsQuotaPeriod": &dcl.Property{ - Type: "string", - GoName: "CpuCfsQuotaPeriod", - Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", - Immutable: true, - }, - "cpuManagerPolicy": &dcl.Property{ - Type: "string", - GoName: "CpuManagerPolicy", - GoType: "NodePoolKubeletConfigCpuManagerPolicyEnum", - Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", - Immutable: true, - ServerDefault: true, - Enum: []string{ - "none", - "static", - }, - }, - "podPidsLimit": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "PodPidsLimit", - Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", - Immutable: true, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "management": &dcl.Property{ - Type: "object", - GoName: "Management", - GoType: "NodePoolManagement", - Description: "The Management configuration for this node pool.", - Properties: map[string]*dcl.Property{ - "autoRepair": &dcl.Property{ - Type: "boolean", - GoName: "AutoRepair", - Description: "Optional. Whether or not the nodes will be automatically repaired.", - }, - }, - }, - "maxPodsConstraint": &dcl.Property{ - Type: "object", - GoName: "MaxPodsConstraint", - GoType: "NodePoolMaxPodsConstraint", - Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", - Immutable: true, - Required: []string{ - "maxPodsPerNode", - }, - Properties: map[string]*dcl.Property{ - "maxPodsPerNode": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxPodsPerNode", - Description: "The maximum number of pods to schedule on a single node.", - Immutable: true, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of this resource.", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "reconciling": &dcl.Property{ - Type: "boolean", - GoName: "Reconciling", - ReadOnly: true, - Description: "Output only. If set, there are currently changes in flight to the node pool.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "NodePoolStateEnum", - ReadOnly: true, - Description: "Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RECONCILING", - "STOPPING", - "ERROR", - "DEGRADED", - }, - }, - "subnetId": &dcl.Property{ - Type: "string", - GoName: "SubnetId", - Description: "The subnet where the node pool node run.", - Immutable: true, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A globally unique identifier for the node pool.", - Immutable: true, - }, - "updateSettings": &dcl.Property{ - Type: "object", - GoName: "UpdateSettings", - GoType: "NodePoolUpdateSettings", - Description: "Optional. Update settings control the speed and disruption of the node pool update.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "surgeSettings": &dcl.Property{ - Type: "object", - GoName: "SurgeSettings", - GoType: "NodePoolUpdateSettingsSurgeSettings", - Description: "Optional. Settings for surge update.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "maxSurge": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxSurge", - Description: "Optional. The maximum number of nodes that can be created beyond the current size of the node pool during the update process.", - ServerDefault: true, - }, - "maxUnavailable": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxUnavailable", - Description: "Optional. The maximum number of nodes that can be simultaneously unavailable during the update process. A node is considered unavailable if its status is not Ready.", - ServerDefault: true, - }, - }, - }, - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time at which this node pool was last updated.", - Immutable: true, - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig.", - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go b/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go deleted file mode 100644 index cde16847588f..000000000000 --- a/mmv1/third_party/terraform/services/containerazure/azure_client_schema.go +++ /dev/null @@ -1,179 +0,0 @@ -package containerazure - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLAzureClientSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "ContainerAzure/Client", - Description: "AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant.", - StructName: "AzureClient", - Reference: &dcl.Link{ - Text: "API reference", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClients", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Multicloud overview", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Client", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "client", - Required: true, - Description: "A full instance of a Client", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Client", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "client", - Required: true, - Description: "A full instance of a Client", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Client", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "client", - Required: true, - Description: "A full instance of a Client", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Client", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Client", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Client": &dcl.Component{ - Title: "AzureClient", - ID: "projects/{{project}}/locations/{{location}}/azureClients/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "tenantId", - "applicationId", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "applicationId": &dcl.Property{ - Type: "string", - GoName: "ApplicationId", - Description: "The Azure Active Directory Application ID.", - Immutable: true, - }, - "certificate": &dcl.Property{ - Type: "string", - GoName: "Certificate", - ReadOnly: true, - Description: "Output only. The PEM encoded x509 certificate.", - Immutable: true, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time at which this resource was created.", - Immutable: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of this resource.", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "tenantId": &dcl.Property{ - Type: "string", - GoName: "TenantId", - Description: "The Azure Active Directory Tenant ID.", - Immutable: true, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A globally unique identifier for the client.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl deleted file mode 100644 index 943ded3bf1e1..000000000000 --- a/mmv1/third_party/terraform/services/containerazure/cluster_schema.go.tmpl +++ /dev/null @@ -1,658 +0,0 @@ -package containerazure - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLClusterSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "ContainerAzure/Cluster", - Description: "An Anthos cluster running on Azure.", - StructName: "Cluster", - Reference: &dcl.Link{ - Text: "API reference", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClusters", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Multicloud overview", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "cluster", - Required: true, - Description: "A full instance of a Cluster", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Cluster", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Cluster": &dcl.Component{ - Title: "Cluster", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "azureRegion", - "resourceGroupId", - "networking", - "controlPlane", - "authorization", - "project", - "location", - "fleet", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", - Immutable: true, - }, - "authorization": &dcl.Property{ - Type: "object", - GoName: "Authorization", - GoType: "ClusterAuthorization", - Description: "Configuration related to the cluster RBAC settings.", - Required: []string{ - "adminUsers", - }, - Properties: map[string]*dcl.Property{ - "adminGroups": &dcl.Property{ - Type: "array", - GoName: "AdminGroups", - Description: "Groups of users that can perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the groups. Up to ten admin groups can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterAuthorizationAdminGroups", - Required: []string{ - "group", - }, - Properties: map[string]*dcl.Property{ - "group": &dcl.Property{ - Type: "string", - GoName: "Group", - Description: "The name of the group, e.g. `my-group@domain.com`.", - }, - }, - }, - }, - "adminUsers": &dcl.Property{ - Type: "array", - GoName: "AdminUsers", - Description: "Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. Up to ten admin users can be provided. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterAuthorizationAdminUsers", - Required: []string{ - "username", - }, - Properties: map[string]*dcl.Property{ - "username": &dcl.Property{ - Type: "string", - GoName: "Username", - Description: "The name of the user, e.g. `my-gcp-id@gmail.com`.", - }, - }, - }, - }, - }, - }, - "azureRegion": &dcl.Property{ - Type: "string", - GoName: "AzureRegion", - Description: "The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region.", - Immutable: true, - }, - "azureServicesAuthentication": &dcl.Property{ - Type: "object", - GoName: "AzureServicesAuthentication", - GoType: "ClusterAzureServicesAuthentication", - Description: "Azure authentication configuration for management of Azure resources", - Conflicts: []string{ - "client", - }, - Required: []string{ - "tenantId", - "applicationId", - }, - Properties: map[string]*dcl.Property{ - "applicationId": &dcl.Property{ - Type: "string", - GoName: "ApplicationId", - Description: "The Azure Active Directory Application ID for Authentication configuration.", - }, - "tenantId": &dcl.Property{ - Type: "string", - GoName: "TenantId", - Description: "The Azure Active Directory Tenant ID for Authentication configuration.", - }, - }, - }, - "client": &dcl.Property{ - Type: "string", - GoName: "Client", - Description: "Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", - Conflicts: []string{ - "azureServicesAuthentication", - }, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "ContainerAzure/AzureClient", - Field: "name", - }, - }, - }, - "controlPlane": &dcl.Property{ - Type: "object", - GoName: "ControlPlane", - GoType: "ClusterControlPlane", - Description: "Configuration related to the cluster control plane.", - Required: []string{ - "version", - "subnetId", - "sshConfig", - }, - Properties: map[string]*dcl.Property{ - "databaseEncryption": &dcl.Property{ - Type: "object", - GoName: "DatabaseEncryption", - GoType: "ClusterControlPlaneDatabaseEncryption", - Description: "Optional. Configuration related to application-layer secrets encryption.", - Immutable: true, - Required: []string{ - "keyId", - }, - Properties: map[string]*dcl.Property{ - "keyId": &dcl.Property{ - Type: "string", - GoName: "KeyId", - Description: "The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported.", - Immutable: true, - }, - }, - }, - "mainVolume": &dcl.Property{ - Type: "object", - GoName: "MainVolume", - GoType: "ClusterControlPlaneMainVolume", - Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "proxyConfig": &dcl.Property{ - Type: "object", - GoName: "ProxyConfig", - GoType: "ClusterControlPlaneProxyConfig", - Description: "Proxy configuration for outbound HTTP(S) traffic.", - Immutable: true, - Required: []string{ - "resourceGroupId", - "secretId", - }, - Properties: map[string]*dcl.Property{ - "resourceGroupId": &dcl.Property{ - Type: "string", - GoName: "ResourceGroupId", - Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", - Immutable: true, - }, - "secretId": &dcl.Property{ - Type: "string", - GoName: "SecretId", - Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", - Immutable: true, - }, - }, - }, - "replicaPlacements": &dcl.Property{ - Type: "array", - GoName: "ReplicaPlacements", - Description: "Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "ClusterControlPlaneReplicaPlacements", - Required: []string{ - "subnetId", - "azureAvailabilityZone", - }, - Properties: map[string]*dcl.Property{ - "azureAvailabilityZone": &dcl.Property{ - Type: "string", - GoName: "AzureAvailabilityZone", - Description: "For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk.", - Immutable: true, - }, - "subnetId": &dcl.Property{ - Type: "string", - GoName: "SubnetId", - Description: "For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration.", - Immutable: true, - }, - }, - }, - }, - "rootVolume": &dcl.Property{ - Type: "object", - GoName: "RootVolume", - GoType: "ClusterControlPlaneRootVolume", - Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "sshConfig": &dcl.Property{ - Type: "object", - GoName: "SshConfig", - GoType: "ClusterControlPlaneSshConfig", - Description: "SSH configuration for how to access the underlying control plane machines.", - Required: []string{ - "authorizedKey", - }, - Properties: map[string]*dcl.Property{ - "authorizedKey": &dcl.Property{ - Type: "string", - GoName: "AuthorizedKey", - Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", - }, - }, - }, - "subnetId": &dcl.Property{ - Type: "string", - GoName: "SubnetId", - Description: "The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`.", - Immutable: true, - }, - "tags": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Tags", - Description: "Optional. A set of tags to apply to all underlying control plane Azure resources.", - Immutable: true, - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig.", - }, - "vmSize": &dcl.Property{ - Type: "string", - GoName: "VmSize", - Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`.", - ServerDefault: true, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time at which this cluster was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", - }, - "endpoint": &dcl.Property{ - Type: "string", - GoName: "Endpoint", - ReadOnly: true, - Description: "Output only. The endpoint of the cluster's API server.", - Immutable: true, - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "fleet": &dcl.Property{ - Type: "object", - GoName: "Fleet", - GoType: "ClusterFleet", - Description: "Fleet configuration.", - Immutable: true, - Required: []string{ - "project", - }, - Properties: map[string]*dcl.Property{ - "membership": &dcl.Property{ - Type: "string", - GoName: "Membership", - ReadOnly: true, - Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The number of the Fleet host project where this cluster will be registered.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - HasLongForm: true, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, -{{- if ne $.TargetVersionName "ga" }} - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "ClusterLoggingConfig", - Description: "Logging configuration.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "componentConfig": &dcl.Property{ - Type: "object", - GoName: "ComponentConfig", - GoType: "ClusterLoggingConfigComponentConfig", - Description: "Configuration of the logging components.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "enableComponents": &dcl.Property{ - Type: "array", - GoName: "EnableComponents", - Description: "Components of the logging configuration to be enabled.", - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "ClusterLoggingConfigComponentConfigEnableComponentsEnum", - Enum: []string{ - "COMPONENT_UNSPECIFIED", - "SYSTEM_COMPONENTS", - "WORKLOADS", - }, - }, - }, - }, - }, - }, - }, - "monitoringConfig": &dcl.Property{ - Type: "object", - GoName: "MonitoringConfig", - GoType: "ClusterMonitoringConfig", - Description: "Monitoring configuration.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "managedPrometheusConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedPrometheusConfig", - GoType: "ClusterMonitoringConfigManagedPrometheusConfig", - Description: "Configuration of the Google Cloud Managed Service for Prometheus.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Configuration of the enable Managed Collection.", - ServerDefault: true, - }, - }, - }, - }, - }, -{{- end }} - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of this resource.", - Immutable: true, - HasLongForm: true, - }, - "networking": &dcl.Property{ - Type: "object", - GoName: "Networking", - GoType: "ClusterNetworking", - Description: "Cluster-wide networking configuration.", - Immutable: true, - Required: []string{ - "virtualNetworkId", - "podAddressCidrBlocks", - "serviceAddressCidrBlocks", - }, - Properties: map[string]*dcl.Property{ - "podAddressCidrBlocks": &dcl.Property{ - Type: "array", - GoName: "PodAddressCidrBlocks", - Description: "The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "serviceAddressCidrBlocks": &dcl.Property{ - Type: "array", - GoName: "ServiceAddressCidrBlocks", - Description: "The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "virtualNetworkId": &dcl.Property{ - Type: "string", - GoName: "VirtualNetworkId", - Description: "The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation.", - Immutable: true, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "reconciling": &dcl.Property{ - Type: "boolean", - GoName: "Reconciling", - ReadOnly: true, - Description: "Output only. If set, there are currently changes in flight to the cluster.", - Immutable: true, - }, - "resourceGroupId": &dcl.Property{ - Type: "string", - GoName: "ResourceGroupId", - Description: "The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*`", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "ClusterStateEnum", - ReadOnly: true, - Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RECONCILING", - "STOPPING", - "ERROR", - "DEGRADED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A globally unique identifier for the cluster.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time at which this cluster was last updated.", - Immutable: true, - }, - "workloadIdentityConfig": &dcl.Property{ - Type: "object", - GoName: "WorkloadIdentityConfig", - GoType: "ClusterWorkloadIdentityConfig", - ReadOnly: true, - Description: "Output only. Workload Identity settings.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "identityProvider": &dcl.Property{ - Type: "string", - GoName: "IdentityProvider", - Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", - Immutable: true, - }, - "issuerUri": &dcl.Property{ - Type: "string", - GoName: "IssuerUri", - Description: "The OIDC issuer URL for this cluster.", - Immutable: true, - }, - "workloadPool": &dcl.Property{ - Type: "string", - GoName: "WorkloadPool", - Description: "The Workload Identity Pool associated to the cluster.", - Immutable: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl deleted file mode 100644 index 2a7a424f1343..000000000000 --- a/mmv1/third_party/terraform/services/containerazure/node_pool_schema.go.tmpl +++ /dev/null @@ -1,417 +0,0 @@ -package containerazure - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLNodePoolSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "ContainerAzure/NodePool", - Description: "An Anthos node pool running on Azure.", - StructName: "NodePool", - Reference: &dcl.Link{ - Text: "API reference", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs/reference/rest/v1/projects.locations.azureClusters.azureNodePools", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Multicloud overview", - URL: "https://cloud.google.com/kubernetes-engine/multi-cloud/docs", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "nodePool", - Required: true, - Description: "A full instance of a NodePool", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "cluster", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many NodePool", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "cluster", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "NodePool": &dcl.Component{ - Title: "NodePool", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/azureClusters/{{ "{{" }}cluster{{ "}}" }}/azureNodePools/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "version", - "config", - "subnetId", - "autoscaling", - "maxPodsConstraint", - "project", - "location", - "cluster", - }, - Properties: map[string]*dcl.Property{ - "annotations": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Annotations", - Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", - }, - "autoscaling": &dcl.Property{ - Type: "object", - GoName: "Autoscaling", - GoType: "NodePoolAutoscaling", - Description: "Autoscaler configuration for this node pool.", - Required: []string{ - "minNodeCount", - "maxNodeCount", - }, - Properties: map[string]*dcl.Property{ - "maxNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxNodeCount", - Description: "Maximum number of nodes in the node pool. Must be >= min_node_count.", - }, - "minNodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MinNodeCount", - Description: "Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", - }, - }, - }, - "azureAvailabilityZone": &dcl.Property{ - Type: "string", - GoName: "AzureAvailabilityZone", - Description: "Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`.", - Immutable: true, - ServerDefault: true, - }, - "cluster": &dcl.Property{ - Type: "string", - GoName: "Cluster", - Description: "The azureCluster for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkemulticloud/Cluster", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "config": &dcl.Property{ - Type: "object", - GoName: "Config", - GoType: "NodePoolConfig", - Description: "The node configuration of the node pool.", - Required: []string{ - "sshConfig", - }, - Properties: map[string]*dcl.Property{ -{{- if ne $.TargetVersionName "ga" }} - "imageType": &dcl.Property{ - Type: "string", - GoName: "ImageType", - Description: "The OS image type to use on node pool instances.", - Immutable: true, - ServerDefault: true, - }, -{{- end }} - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", - Immutable: true, - }, - "proxyConfig": &dcl.Property{ - Type: "object", - GoName: "ProxyConfig", - GoType: "NodePoolConfigProxyConfig", - Description: "Proxy configuration for outbound HTTP(S) traffic.", - Immutable: true, - Required: []string{ - "resourceGroupId", - "secretId", - }, - Properties: map[string]*dcl.Property{ - "resourceGroupId": &dcl.Property{ - Type: "string", - GoName: "ResourceGroupId", - Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", - Immutable: true, - }, - "secretId": &dcl.Property{ - Type: "string", - GoName: "SecretId", - Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", - Immutable: true, - }, - }, - }, - "rootVolume": &dcl.Property{ - Type: "object", - GoName: "RootVolume", - GoType: "NodePoolConfigRootVolume", - Description: "Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "sizeGib": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SizeGib", - Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "sshConfig": &dcl.Property{ - Type: "object", - GoName: "SshConfig", - GoType: "NodePoolConfigSshConfig", - Description: "SSH configuration for how to access the node pool machines.", - Required: []string{ - "authorizedKey", - }, - Properties: map[string]*dcl.Property{ - "authorizedKey": &dcl.Property{ - Type: "string", - GoName: "AuthorizedKey", - Description: "The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", - }, - }, - }, - "tags": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Tags", - Description: "Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", - Immutable: true, - }, - "vmSize": &dcl.Property{ - Type: "string", - GoName: "VmSize", - Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time at which this node pool was created.", - Immutable: true, - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - Immutable: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "management": &dcl.Property{ - Type: "object", - GoName: "Management", - GoType: "NodePoolManagement", - Description: "The Management configuration for this node pool.", - Properties: map[string]*dcl.Property{ - "autoRepair": &dcl.Property{ - Type: "boolean", - GoName: "AutoRepair", - Description: "Optional. Whether or not the nodes will be automatically repaired.", - }, - }, - }, - "maxPodsConstraint": &dcl.Property{ - Type: "object", - GoName: "MaxPodsConstraint", - GoType: "NodePoolMaxPodsConstraint", - Description: "The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", - Immutable: true, - Required: []string{ - "maxPodsPerNode", - }, - Properties: map[string]*dcl.Property{ - "maxPodsPerNode": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxPodsPerNode", - Description: "The maximum number of pods to schedule on a single node.", - Immutable: true, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of this resource.", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "reconciling": &dcl.Property{ - Type: "boolean", - GoName: "Reconciling", - ReadOnly: true, - Description: "Output only. If set, there are currently pending changes to the node pool.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "NodePoolStateEnum", - ReadOnly: true, - Description: "Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RECONCILING", - "STOPPING", - "ERROR", - "DEGRADED", - }, - }, - "subnetId": &dcl.Property{ - Type: "string", - GoName: "SubnetId", - Description: "The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration.", - Immutable: true, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. A globally unique identifier for the node pool.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time at which this node pool was last updated.", - Immutable: true, - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool.", - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl deleted file mode 100644 index f2256349117a..000000000000 --- a/mmv1/third_party/terraform/services/dataplex/asset_schema.go.tmpl +++ /dev/null @@ -1,504 +0,0 @@ -package dataplex - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLAssetSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataplex/Asset", - Description: "The Dataplex Asset resource", - StructName: "Asset", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Asset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "asset", - Required: true, - Description: "A full instance of a Asset", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Asset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "asset", - Required: true, - Description: "A full instance of a Asset", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Asset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "asset", - Required: true, - Description: "A full instance of a Asset", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Asset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "dataplexZone", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "lake", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Asset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "dataplexZone", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "lake", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Asset": &dcl.Component{ - Title: "Asset", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}dataplex_zone{{ "}}" }}/assets/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "resourceSpec", - "discoverySpec", - "project", - "location", - "lake", - "dataplexZone", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when the asset was created.", - Immutable: true, - }, - "dataplexZone": &dcl.Property{ - Type: "string", - GoName: "DataplexZone", - Description: "The zone for the resource", - Immutable: true, - Parameter: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. Description of the asset.", - }, - "discoverySpec": &dcl.Property{ - Type: "object", - GoName: "DiscoverySpec", - GoType: "AssetDiscoverySpec", - Description: "Required. Specification of the discovery feature applied to data referenced by this asset. When this spec is left unset, the asset will use the spec set on the parent zone.", - Required: []string{ - "enabled", - }, - Properties: map[string]*dcl.Property{ - "csvOptions": &dcl.Property{ - Type: "object", - GoName: "CsvOptions", - GoType: "AssetDiscoverySpecCsvOptions", - Description: "Optional. Configuration for CSV data.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "delimiter": &dcl.Property{ - Type: "string", - GoName: "Delimiter", - Description: "Optional. The delimiter being used to separate values. This defaults to ','.", - }, - "disableTypeInference": &dcl.Property{ - Type: "boolean", - GoName: "DisableTypeInference", - Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", - }, - "encoding": &dcl.Property{ - Type: "string", - GoName: "Encoding", - Description: "Optional. The character encoding of the data. The default is UTF-8.", - }, - "headerRows": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "HeaderRows", - Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", - }, - }, - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Required. Whether discovery is enabled.", - }, - "excludePatterns": &dcl.Property{ - Type: "array", - GoName: "ExcludePatterns", - Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "includePatterns": &dcl.Property{ - Type: "array", - GoName: "IncludePatterns", - Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "jsonOptions": &dcl.Property{ - Type: "object", - GoName: "JsonOptions", - GoType: "AssetDiscoverySpecJsonOptions", - Description: "Optional. Configuration for Json data.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "disableTypeInference": &dcl.Property{ - Type: "boolean", - GoName: "DisableTypeInference", - Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", - }, - "encoding": &dcl.Property{ - Type: "string", - GoName: "Encoding", - Description: "Optional. The character encoding of the data. The default is UTF-8.", - }, - }, - }, - "schedule": &dcl.Property{ - Type: "string", - GoName: "Schedule", - Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", - }, - }, - }, - "discoveryStatus": &dcl.Property{ - Type: "object", - GoName: "DiscoveryStatus", - GoType: "AssetDiscoveryStatus", - ReadOnly: true, - Description: "Output only. Status of the discovery feature applied to data referenced by this asset.", - Properties: map[string]*dcl.Property{ - "lastRunDuration": &dcl.Property{ - Type: "string", - GoName: "LastRunDuration", - Description: "The duration of the last discovery run.", - }, - "lastRunTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "LastRunTime", - Description: "The start time of the last discovery run.", - }, - "message": &dcl.Property{ - Type: "string", - GoName: "Message", - Description: "Additional information about the current state.", - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "AssetDiscoveryStatusStateEnum", - Description: "The current status of the discovery feature. Possible values: STATE_UNSPECIFIED, SCHEDULED, IN_PROGRESS, PAUSED, DISABLED", - Enum: []string{ - "STATE_UNSPECIFIED", - "SCHEDULED", - "IN_PROGRESS", - "PAUSED", - "DISABLED", - }, - }, - "stats": &dcl.Property{ - Type: "object", - GoName: "Stats", - GoType: "AssetDiscoveryStatusStats", - Description: "Data Stats of the asset reported by discovery.", - Properties: map[string]*dcl.Property{ - "dataItems": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "DataItems", - Description: "The count of data items within the referenced resource.", - }, - "dataSize": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "DataSize", - Description: "The number of stored data bytes within the referenced resource.", - }, - "filesets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Filesets", - Description: "The count of fileset entities within the referenced resource.", - }, - "tables": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Tables", - Description: "The count of table entities within the referenced resource.", - }, - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the status.", - }, - }, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Optional. User friendly display name.", - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. User defined labels for the asset.", - }, - "lake": &dcl.Property{ - Type: "string", - GoName: "Lake", - Description: "The lake for the resource", - Immutable: true, - Parameter: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of the asset.", - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "resourceSpec": &dcl.Property{ - Type: "object", - GoName: "ResourceSpec", - GoType: "AssetResourceSpec", - Description: "Required. Immutable. Specification of the resource that is referenced by this asset.", - Required: []string{ - "type", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. Relative name of the cloud resource that contains the data that is being managed within a lake. For example: `projects/{project_number}/buckets/{bucket_id}` `projects/{project_number}/datasets/{dataset_id}`", - Immutable: true, - }, - "readAccessMode": &dcl.Property{ - Type: "string", - GoName: "ReadAccessMode", - GoType: "AssetResourceSpecReadAccessModeEnum", - Description: "Optional. Determines how read permissions are handled for each asset and their associated tables. Only available to storage buckets assets. Possible values: DIRECT, MANAGED", - ServerDefault: true, - Enum: []string{ - "DIRECT", - "MANAGED", - }, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - GoType: "AssetResourceSpecTypeEnum", - Description: "Required. Immutable. Type of resource. Possible values: STORAGE_BUCKET, BIGQUERY_DATASET", - Immutable: true, - Enum: []string{ - "STORAGE_BUCKET", - "BIGQUERY_DATASET", - }, - }, - }, - }, - "resourceStatus": &dcl.Property{ - Type: "object", - GoName: "ResourceStatus", - GoType: "AssetResourceStatus", - ReadOnly: true, - Description: "Output only. Status of the resource referenced by this asset.", - Properties: map[string]*dcl.Property{ - "message": &dcl.Property{ - Type: "string", - GoName: "Message", - Description: "Additional information about the current state.", - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "AssetResourceStatusStateEnum", - Description: "The current state of the managed resource. Possible values: STATE_UNSPECIFIED, READY, ERROR", - Enum: []string{ - "STATE_UNSPECIFIED", - "READY", - "ERROR", - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the status.", - }, - }, - }, - "securityStatus": &dcl.Property{ - Type: "object", - GoName: "SecurityStatus", - GoType: "AssetSecurityStatus", - ReadOnly: true, - Description: "Output only. Status of the security policy applied to resource referenced by this asset.", - Properties: map[string]*dcl.Property{ - "message": &dcl.Property{ - Type: "string", - GoName: "Message", - Description: "Additional information about the current state.", - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "AssetSecurityStatusStateEnum", - Description: "The current state of the security policy applied to the attached resource. Possible values: STATE_UNSPECIFIED, READY, APPLYING, ERROR", - Enum: []string{ - "STATE_UNSPECIFIED", - "READY", - "APPLYING", - "ERROR", - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the status.", - }, - }, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "AssetStateEnum", - ReadOnly: true, - Description: "Output only. Current state of the asset. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "ACTIVE", - "CREATING", - "DELETING", - "ACTION_REQUIRED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. System generated globally unique ID for the asset. This ID will be different if the asset is deleted and re-created with the same name.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time when the asset was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl deleted file mode 100644 index b097e3d65ff5..000000000000 --- a/mmv1/third_party/terraform/services/dataplex/lake_schema.go.tmpl +++ /dev/null @@ -1,280 +0,0 @@ -package dataplex - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLLakeSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataplex/Lake", - Description: "The Dataplex Lake resource", - StructName: "Lake", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Lake", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "lake", - Required: true, - Description: "A full instance of a Lake", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Lake", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "lake", - Required: true, - Description: "A full instance of a Lake", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Lake", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "lake", - Required: true, - Description: "A full instance of a Lake", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Lake", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Lake", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Lake": &dcl.Component{ - Title: "Lake", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "assetStatus": &dcl.Property{ - Type: "object", - GoName: "AssetStatus", - GoType: "LakeAssetStatus", - ReadOnly: true, - Description: "Output only. Aggregated status of the underlying assets of the lake.", - Properties: map[string]*dcl.Property{ - "activeAssets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "ActiveAssets", - Description: "Number of active assets.", - }, - "securityPolicyApplyingAssets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SecurityPolicyApplyingAssets", - Description: "Number of assets that are in process of updating the security policy on attached resources.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the status.", - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when the lake was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. Description of the lake.", - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Optional. User friendly display name.", - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. User-defined labels for the lake.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "metastore": &dcl.Property{ - Type: "object", - GoName: "Metastore", - GoType: "LakeMetastore", - Description: "Optional. Settings to manage lake and Dataproc Metastore service instance association.", - Properties: map[string]*dcl.Property{ - "service": &dcl.Property{ - Type: "string", - GoName: "Service", - Description: "Optional. A relative reference to the Dataproc Metastore (https://cloud.google.com/dataproc-metastore/docs) service associated with the lake: `projects/{project_id}/locations/{location_id}/services/{service_id}`", - }, - }, - }, - "metastoreStatus": &dcl.Property{ - Type: "object", - GoName: "MetastoreStatus", - GoType: "LakeMetastoreStatus", - ReadOnly: true, - Description: "Output only. Metastore status of the lake.", - Properties: map[string]*dcl.Property{ - "endpoint": &dcl.Property{ - Type: "string", - GoName: "Endpoint", - Description: "The URI of the endpoint used to access the Metastore service.", - }, - "message": &dcl.Property{ - Type: "string", - GoName: "Message", - Description: "Additional information about the current status.", - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "LakeMetastoreStatusStateEnum", - Description: "Current state of association. Possible values: STATE_UNSPECIFIED, NONE, READY, UPDATING, ERROR", - Enum: []string{ - "STATE_UNSPECIFIED", - "NONE", - "READY", - "UPDATING", - "ERROR", - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the metastore status of the lake.", - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of the lake.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Dataplex/Lake", - Field: "selfLink", - Parent: true, - }, - }, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "serviceAccount": &dcl.Property{ - Type: "string", - GoName: "ServiceAccount", - ReadOnly: true, - Description: "Output only. Service account associated with this lake. This service account must be authorized to access or operate on resources managed by the lake.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "LakeStateEnum", - ReadOnly: true, - Description: "Output only. Current state of the lake. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "ACTIVE", - "CREATING", - "DELETING", - "ACTION_REQUIRED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. System generated globally unique ID for the lake. This ID will be different if the lake is deleted and re-created with the same name.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time when the lake was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl deleted file mode 100644 index 7216afa27c4a..000000000000 --- a/mmv1/third_party/terraform/services/dataplex/zone_schema.go.tmpl +++ /dev/null @@ -1,376 +0,0 @@ -package dataplex - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLZoneSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataplex/Zone", - Description: "The Dataplex Zone resource", - StructName: "Zone", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Zone", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "zone", - Required: true, - Description: "A full instance of a Zone", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Zone", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "zone", - Required: true, - Description: "A full instance of a Zone", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Zone", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "zone", - Required: true, - Description: "A full instance of a Zone", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Zone", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "lake", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Zone", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "lake", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Zone": &dcl.Component{ - Title: "Zone", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/lakes/{{ "{{" }}lake{{ "}}" }}/zones/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "type", - "discoverySpec", - "resourceSpec", - "project", - "location", - "lake", - }, - Properties: map[string]*dcl.Property{ - "assetStatus": &dcl.Property{ - Type: "object", - GoName: "AssetStatus", - GoType: "ZoneAssetStatus", - ReadOnly: true, - Description: "Output only. Aggregated status of the underlying assets of the zone.", - Properties: map[string]*dcl.Property{ - "activeAssets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "ActiveAssets", - Description: "Number of active assets.", - }, - "securityPolicyApplyingAssets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "SecurityPolicyApplyingAssets", - Description: "Number of assets that are in process of updating the security policy on attached resources.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - Description: "Last update time of the status.", - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when the zone was created.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. Description of the zone.", - }, - "discoverySpec": &dcl.Property{ - Type: "object", - GoName: "DiscoverySpec", - GoType: "ZoneDiscoverySpec", - Description: "Required. Specification of the discovery feature applied to data in this zone.", - Required: []string{ - "enabled", - }, - Properties: map[string]*dcl.Property{ - "csvOptions": &dcl.Property{ - Type: "object", - GoName: "CsvOptions", - GoType: "ZoneDiscoverySpecCsvOptions", - Description: "Optional. Configuration for CSV data.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "delimiter": &dcl.Property{ - Type: "string", - GoName: "Delimiter", - Description: "Optional. The delimiter being used to separate values. This defaults to ','.", - }, - "disableTypeInference": &dcl.Property{ - Type: "boolean", - GoName: "DisableTypeInference", - Description: "Optional. Whether to disable the inference of data type for CSV data. If true, all columns will be registered as strings.", - }, - "encoding": &dcl.Property{ - Type: "string", - GoName: "Encoding", - Description: "Optional. The character encoding of the data. The default is UTF-8.", - }, - "headerRows": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "HeaderRows", - Description: "Optional. The number of rows to interpret as header rows that should be skipped when reading data rows.", - }, - }, - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Required. Whether discovery is enabled.", - }, - "excludePatterns": &dcl.Property{ - Type: "array", - GoName: "ExcludePatterns", - Description: "Optional. The list of patterns to apply for selecting data to exclude during discovery. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "includePatterns": &dcl.Property{ - Type: "array", - GoName: "IncludePatterns", - Description: "Optional. The list of patterns to apply for selecting data to include during discovery if only a subset of the data should considered. For Cloud Storage bucket assets, these are interpreted as glob patterns used to match object names. For BigQuery dataset assets, these are interpreted as patterns to match table names.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "jsonOptions": &dcl.Property{ - Type: "object", - GoName: "JsonOptions", - GoType: "ZoneDiscoverySpecJsonOptions", - Description: "Optional. Configuration for Json data.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "disableTypeInference": &dcl.Property{ - Type: "boolean", - GoName: "DisableTypeInference", - Description: "Optional. Whether to disable the inference of data type for Json data. If true, all columns will be registered as their primitive types (strings, number or boolean).", - }, - "encoding": &dcl.Property{ - Type: "string", - GoName: "Encoding", - Description: "Optional. The character encoding of the data. The default is UTF-8.", - }, - }, - }, - "schedule": &dcl.Property{ - Type: "string", - GoName: "Schedule", - Description: "Optional. Cron schedule (https://en.wikipedia.org/wiki/Cron) for running discovery periodically. Successive discovery runs must be scheduled at least 60 minutes apart. The default value is to run discovery every 60 minutes. To explicitly set a timezone to the cron tab, apply a prefix in the cron tab: \"CRON_TZ=${IANA_TIME_ZONE}\" or TZ=${IANA_TIME_ZONE}\". The ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. For example, \"CRON_TZ=America/New_York 1 * * * *\", or \"TZ=America/New_York 1 * * * *\".", - ServerDefault: true, - }, - }, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Optional. User friendly display name.", - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. User defined labels for the zone.", - }, - "lake": &dcl.Property{ - Type: "string", - GoName: "Lake", - Description: "The lake for the resource", - Immutable: true, - Parameter: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The name of the zone.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Dataplex/Zone", - Field: "selfLink", - Parent: true, - }, - }, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "resourceSpec": &dcl.Property{ - Type: "object", - GoName: "ResourceSpec", - GoType: "ZoneResourceSpec", - Description: "Required. Immutable. Specification of the resources that are referenced by the assets within this zone.", - Immutable: true, - Required: []string{ - "locationType", - }, - Properties: map[string]*dcl.Property{ - "locationType": &dcl.Property{ - Type: "string", - GoName: "LocationType", - GoType: "ZoneResourceSpecLocationTypeEnum", - Description: "Required. Immutable. The location type of the resources that are allowed to be attached to the assets within this zone. Possible values: LOCATION_TYPE_UNSPECIFIED, SINGLE_REGION, MULTI_REGION", - Immutable: true, - Enum: []string{ - "LOCATION_TYPE_UNSPECIFIED", - "SINGLE_REGION", - "MULTI_REGION", - }, - }, - }, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "ZoneStateEnum", - ReadOnly: true, - Description: "Output only. Current state of the zone. Possible values: STATE_UNSPECIFIED, ACTIVE, CREATING, DELETING, ACTION_REQUIRED", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "ACTIVE", - "CREATING", - "DELETING", - "ACTION_REQUIRED", - }, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - GoType: "ZoneTypeEnum", - Description: "Required. Immutable. The type of the zone. Possible values: TYPE_UNSPECIFIED, RAW, CURATED", - Immutable: true, - Enum: []string{ - "TYPE_UNSPECIFIED", - "RAW", - "CURATED", - }, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. System generated globally unique ID for the zone. This ID will be different if the zone is deleted and re-created with the same name.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time when the zone was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl deleted file mode 100644 index 726fc3d1a99b..000000000000 --- a/mmv1/third_party/terraform/services/dataproc/workflow_template_schema.go.tmpl +++ /dev/null @@ -1,2230 +0,0 @@ -package dataproc - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLWorkflowTemplateSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Dataproc/WorkflowTemplate", - Description: "The Dataproc WorkflowTemplate resource", - StructName: "WorkflowTemplate", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a WorkflowTemplate", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workflowTemplate", - Required: true, - Description: "A full instance of a WorkflowTemplate", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a WorkflowTemplate", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workflowTemplate", - Required: true, - Description: "A full instance of a WorkflowTemplate", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a WorkflowTemplate", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "workflowTemplate", - Required: true, - Description: "A full instance of a WorkflowTemplate", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all WorkflowTemplate", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many WorkflowTemplate", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "WorkflowTemplate": &dcl.Component{ - Title: "WorkflowTemplate", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/workflowTemplates/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "placement", - "jobs", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time template was created.", - Immutable: true, - }, - "dagTimeout": &dcl.Property{ - Type: "string", - GoName: "DagTimeout", - Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", - Immutable: true, - }, - "encryptionConfig": &dcl.Property{ - Type: "object", - GoName: "EncryptionConfig", - GoType: "WorkflowTemplateEncryptionConfig", - Description: "Optional. The encryption configuration for the workflow template.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "kmsKey": &dcl.Property{ - Type: "string", - GoName: "KmsKey", - Description: "Optional. The Cloud KMS key name to use for encryption.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudkms/CryptoKey", - Field: "selfLink", - }, - }, - }, - }, - }, - "jobs": &dcl.Property{ - Type: "array", - GoName: "Jobs", - Description: "Required. The Directed Acyclic Graph of Jobs to submit.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplateJobs", - Required: []string{ - "stepId", - }, - Properties: map[string]*dcl.Property{ - "hadoopJob": &dcl.Property{ - Type: "object", - GoName: "HadoopJob", - GoType: "WorkflowTemplateJobsHadoopJob", - Description: "Optional. Job is a Hadoop job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "archiveUris": &dcl.Property{ - Type: "array", - GoName: "ArchiveUris", - Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "fileUris": &dcl.Property{ - Type: "array", - GoName: "FileUris", - Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsHadoopJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "mainClass": &dcl.Property{ - Type: "string", - GoName: "MainClass", - Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", - Immutable: true, - }, - "mainJarFileUri": &dcl.Property{ - Type: "string", - GoName: "MainJarFileUri", - Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - Immutable: true, - }, - }, - }, - "hiveJob": &dcl.Property{ - Type: "object", - GoName: "HiveJob", - GoType: "WorkflowTemplateJobsHiveJob", - Description: "Optional. Job is a Hive job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "continueOnFailure": &dcl.Property{ - Type: "boolean", - GoName: "ContinueOnFailure", - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - Immutable: true, - }, - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - Immutable: true, - }, - "queryFileUri": &dcl.Property{ - Type: "string", - GoName: "QueryFileUri", - Description: "The HCFS URI of the script that contains Hive queries.", - Immutable: true, - }, - "queryList": &dcl.Property{ - Type: "object", - GoName: "QueryList", - GoType: "WorkflowTemplateJobsHiveJobQueryList", - Description: "A list of queries.", - Immutable: true, - Required: []string{ - "queries", - }, - Properties: map[string]*dcl.Property{ - "queries": &dcl.Property{ - Type: "array", - GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "scriptVariables": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "ScriptVariables", - Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", - Immutable: true, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", - Immutable: true, - }, - "pigJob": &dcl.Property{ - Type: "object", - GoName: "PigJob", - GoType: "WorkflowTemplateJobsPigJob", - Description: "Optional. Job is a Pig job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "continueOnFailure": &dcl.Property{ - Type: "boolean", - GoName: "ContinueOnFailure", - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - Immutable: true, - }, - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsPigJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", - Immutable: true, - }, - "queryFileUri": &dcl.Property{ - Type: "string", - GoName: "QueryFileUri", - Description: "The HCFS URI of the script that contains the Pig queries.", - Immutable: true, - }, - "queryList": &dcl.Property{ - Type: "object", - GoName: "QueryList", - GoType: "WorkflowTemplateJobsPigJobQueryList", - Description: "A list of queries.", - Immutable: true, - Required: []string{ - "queries", - }, - Properties: map[string]*dcl.Property{ - "queries": &dcl.Property{ - Type: "array", - GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "scriptVariables": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "ScriptVariables", - Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", - Immutable: true, - }, - }, - }, - "prerequisiteStepIds": &dcl.Property{ - Type: "array", - GoName: "PrerequisiteStepIds", - Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "prestoJob": &dcl.Property{ - Type: "object", - GoName: "PrestoJob", - GoType: "WorkflowTemplateJobsPrestoJob", - Description: "Optional. Job is a Presto job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "clientTags": &dcl.Property{ - Type: "array", - GoName: "ClientTags", - Description: "Optional. Presto client tags to attach to this query", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "continueOnFailure": &dcl.Property{ - Type: "boolean", - GoName: "ContinueOnFailure", - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - Immutable: true, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsPrestoJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "outputFormat": &dcl.Property{ - Type: "string", - GoName: "OutputFormat", - Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", - Immutable: true, - }, - "queryFileUri": &dcl.Property{ - Type: "string", - GoName: "QueryFileUri", - Description: "The HCFS URI of the script that contains SQL queries.", - Immutable: true, - }, - "queryList": &dcl.Property{ - Type: "object", - GoName: "QueryList", - GoType: "WorkflowTemplateJobsPrestoJobQueryList", - Description: "A list of queries.", - Immutable: true, - Required: []string{ - "queries", - }, - Properties: map[string]*dcl.Property{ - "queries": &dcl.Property{ - Type: "array", - GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - }, - }, - "pysparkJob": &dcl.Property{ - Type: "object", - GoName: "PysparkJob", - GoType: "WorkflowTemplateJobsPysparkJob", - Description: "Optional. Job is a PySpark job.", - Immutable: true, - Required: []string{ - "mainPythonFileUri", - }, - Properties: map[string]*dcl.Property{ - "archiveUris": &dcl.Property{ - Type: "array", - GoName: "ArchiveUris", - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "fileUris": &dcl.Property{ - Type: "array", - GoName: "FileUris", - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsPysparkJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "mainPythonFileUri": &dcl.Property{ - Type: "string", - GoName: "MainPythonFileUri", - Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Immutable: true, - }, - "pythonFileUris": &dcl.Property{ - Type: "array", - GoName: "PythonFileUris", - Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "scheduling": &dcl.Property{ - Type: "object", - GoName: "Scheduling", - GoType: "WorkflowTemplateJobsScheduling", - Description: "Optional. Job scheduling configuration.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "maxFailuresPerHour": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxFailuresPerHour", - Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", - Immutable: true, - }, - "maxFailuresTotal": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MaxFailuresTotal", - Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", - Immutable: true, - }, - }, - }, - "sparkJob": &dcl.Property{ - Type: "object", - GoName: "SparkJob", - GoType: "WorkflowTemplateJobsSparkJob", - Description: "Optional. Job is a Spark job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "archiveUris": &dcl.Property{ - Type: "array", - GoName: "ArchiveUris", - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "fileUris": &dcl.Property{ - Type: "array", - GoName: "FileUris", - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsSparkJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "mainClass": &dcl.Property{ - Type: "string", - GoName: "MainClass", - Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", - Immutable: true, - }, - "mainJarFileUri": &dcl.Property{ - Type: "string", - GoName: "MainJarFileUri", - Description: "The HCFS URI of the jar file that contains the main class.", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Immutable: true, - }, - }, - }, - "sparkRJob": &dcl.Property{ - Type: "object", - GoName: "SparkRJob", - GoType: "WorkflowTemplateJobsSparkRJob", - Description: "Optional. Job is a SparkR job.", - Immutable: true, - Required: []string{ - "mainRFileUri", - }, - Properties: map[string]*dcl.Property{ - "archiveUris": &dcl.Property{ - Type: "array", - GoName: "ArchiveUris", - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "fileUris": &dcl.Property{ - Type: "array", - GoName: "FileUris", - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsSparkRJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "mainRFileUri": &dcl.Property{ - Type: "string", - GoName: "MainRFileUri", - Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", - Immutable: true, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Immutable: true, - }, - }, - }, - "sparkSqlJob": &dcl.Property{ - Type: "object", - GoName: "SparkSqlJob", - GoType: "WorkflowTemplateJobsSparkSqlJob", - Description: "Optional. Job is a SparkSql job.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "jarFileUris": &dcl.Property{ - Type: "array", - GoName: "JarFileUris", - Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "WorkflowTemplateJobsSparkSqlJobLoggingConfig", - Description: "Optional. The runtime log config for job execution.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "driverLogLevels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "DriverLogLevels", - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Immutable: true, - }, - }, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", - Immutable: true, - }, - "queryFileUri": &dcl.Property{ - Type: "string", - GoName: "QueryFileUri", - Description: "The HCFS URI of the script that contains SQL queries.", - Immutable: true, - }, - "queryList": &dcl.Property{ - Type: "object", - GoName: "QueryList", - GoType: "WorkflowTemplateJobsSparkSqlJobQueryList", - Description: "A list of queries.", - Immutable: true, - Required: []string{ - "queries", - }, - Properties: map[string]*dcl.Property{ - "queries": &dcl.Property{ - Type: "array", - GoName: "Queries", - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "scriptVariables": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "ScriptVariables", - Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", - Immutable: true, - }, - }, - }, - "stepId": &dcl.Property{ - Type: "string", - GoName: "StepId", - Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", - Immutable: true, - }, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.", - Immutable: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", - Immutable: true, - Parameter: true, - HasLongForm: true, - }, - "parameters": &dcl.Property{ - Type: "array", - GoName: "Parameters", - Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplateParameters", - Required: []string{ - "name", - "fields", - }, - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", - Immutable: true, - }, - "fields": &dcl.Property{ - Type: "array", - GoName: "Fields", - Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", - Immutable: true, - }, - "validation": &dcl.Property{ - Type: "object", - GoName: "Validation", - GoType: "WorkflowTemplateParametersValidation", - Description: "Optional. Validation rules to be applied to this parameter's value.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "regex": &dcl.Property{ - Type: "object", - GoName: "Regex", - GoType: "WorkflowTemplateParametersValidationRegex", - Description: "Validation based on regular expressions.", - Immutable: true, - Required: []string{ - "regexes", - }, - Properties: map[string]*dcl.Property{ - "regexes": &dcl.Property{ - Type: "array", - GoName: "Regexes", - Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "values": &dcl.Property{ - Type: "object", - GoName: "Values", - GoType: "WorkflowTemplateParametersValidationValues", - Description: "Validation based on a list of allowed values.", - Immutable: true, - Required: []string{ - "values", - }, - Properties: map[string]*dcl.Property{ - "values": &dcl.Property{ - Type: "array", - GoName: "Values", - Description: "Required. List of allowed values for the parameter.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - }, - }, - }, - }, - }, - "placement": &dcl.Property{ - Type: "object", - GoName: "Placement", - GoType: "WorkflowTemplatePlacement", - Description: "Required. WorkflowTemplate scheduling information.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "clusterSelector": &dcl.Property{ - Type: "object", - GoName: "ClusterSelector", - GoType: "WorkflowTemplatePlacementClusterSelector", - Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", - Immutable: true, - Required: []string{ - "clusterLabels", - }, - Properties: map[string]*dcl.Property{ - "clusterLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "ClusterLabels", - Description: "Required. The cluster labels. Cluster must have all labels to match.", - Immutable: true, - }, - "zone": &dcl.Property{ - Type: "string", - GoName: "Zone", - Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", - Immutable: true, - }, - }, - }, - "managedCluster": &dcl.Property{ - Type: "object", - GoName: "ManagedCluster", - GoType: "WorkflowTemplatePlacementManagedCluster", - Description: "A cluster that is managed by the workflow.", - Immutable: true, - Required: []string{ - "clusterName", - "config", - }, - Properties: map[string]*dcl.Property{ - "clusterName": &dcl.Property{ - Type: "string", - GoName: "ClusterName", - Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", - Immutable: true, - }, - "config": &dcl.Property{ - Type: "object", - GoName: "Config", - GoType: "WorkflowTemplatePlacementManagedClusterConfig", - Description: "Required. The cluster configuration.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "autoscalingConfig": &dcl.Property{ - Type: "object", - GoName: "AutoscalingConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig", - Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "policy": &dcl.Property{ - Type: "string", - GoName: "Policy", - Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Dataproc/AutoscalingPolicy", - Field: "name", - }, - }, - }, - }, - }, - "encryptionConfig": &dcl.Property{ - Type: "object", - GoName: "EncryptionConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig", - Description: "Optional. Encryption settings for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "gcePdKmsKeyName": &dcl.Property{ - Type: "string", - GoName: "GcePdKmsKeyName", - Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudkms/CryptoKey", - Field: "selfLink", - }, - }, - }, - }, - }, - "endpointConfig": &dcl.Property{ - Type: "object", - GoName: "EndpointConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigEndpointConfig", - Description: "Optional. Port/endpoint configuration for this cluster", - Immutable: true, - Properties: map[string]*dcl.Property{ - "enableHttpPortAccess": &dcl.Property{ - Type: "boolean", - GoName: "EnableHttpPortAccess", - Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", - Immutable: true, - }, - "httpPorts": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "HttpPorts", - ReadOnly: true, - Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", - Immutable: true, - }, - }, - }, - "gceClusterConfig": &dcl.Property{ - Type: "object", - GoName: "GceClusterConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig", - Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "internalIPOnly": &dcl.Property{ - Type: "boolean", - GoName: "InternalIPOnly", - Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", - Immutable: true, - ServerDefault: true, - }, - "metadata": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Metadata", - Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", - Immutable: true, - }, - "network": &dcl.Property{ - Type: "string", - GoName: "Network", - Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Network", - Field: "selfLink", - }, - }, - }, - "nodeGroupAffinity": &dcl.Property{ - Type: "object", - GoName: "NodeGroupAffinity", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity", - Description: "Optional. Node Group Affinity for sole-tenant clusters.", - Immutable: true, - Required: []string{ - "nodeGroup", - }, - Properties: map[string]*dcl.Property{ - "nodeGroup": &dcl.Property{ - Type: "string", - GoName: "NodeGroup", - Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/NodeGroup", - Field: "selfLink", - }, - }, - }, - }, - }, - "privateIPv6GoogleAccess": &dcl.Property{ - Type: "string", - GoName: "PrivateIPv6GoogleAccess", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnum", - Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", - Immutable: true, - Enum: []string{ - "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", - "INHERIT_FROM_SUBNETWORK", - "OUTBOUND", - "BIDIRECTIONAL", - }, - }, - "reservationAffinity": &dcl.Property{ - Type: "object", - GoName: "ReservationAffinity", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity", - Description: "Optional. Reservation Affinity for consuming Zonal reservation.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "consumeReservationType": &dcl.Property{ - Type: "string", - GoName: "ConsumeReservationType", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnum", - Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", - Immutable: true, - Enum: []string{ - "TYPE_UNSPECIFIED", - "NO_RESERVATION", - "ANY_RESERVATION", - "SPECIFIC_RESERVATION", - }, - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "Optional. Corresponds to the label key of reservation resource.", - Immutable: true, - }, - "values": &dcl.Property{ - Type: "array", - GoName: "Values", - Description: "Optional. Corresponds to the label values of reservation resource.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "serviceAccount": &dcl.Property{ - Type: "string", - GoName: "ServiceAccount", - Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - }, - "serviceAccountScopes": &dcl.Property{ - Type: "array", - GoName: "ServiceAccountScopes", - Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "shieldedInstanceConfig": &dcl.Property{ - Type: "object", - GoName: "ShieldedInstanceConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig", - Description: "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "enableIntegrityMonitoring": &dcl.Property{ - Type: "boolean", - GoName: "EnableIntegrityMonitoring", - Description: "Optional. Defines whether instances have integrity monitoring enabled. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not.", - Immutable: true, - }, - "enableSecureBoot": &dcl.Property{ - Type: "boolean", - GoName: "EnableSecureBoot", - Description: "Optional. Defines whether the instances have Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", - Immutable: true, - }, - "enableVtpm": &dcl.Property{ - Type: "boolean", - GoName: "EnableVtpm", - Description: "Optional. Defines whether the instance have the vTPM enabled. Virtual Trusted Platform Module protects objects like keys, certificates and enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline.", - Immutable: true, - }, - }, - }, - "subnetwork": &dcl.Property{ - Type: "string", - GoName: "Subnetwork", - Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Subnetwork", - Field: "selfLink", - }, - }, - }, - "tags": &dcl.Property{ - Type: "array", - GoName: "Tags", - Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", - Immutable: true, - SendEmpty: true, - ListType: "set", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "zone": &dcl.Property{ - Type: "string", - GoName: "Zone", - Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", - Immutable: true, - }, - }, - }, -{{- if ne $.TargetVersionName "ga" }} - "gkeClusterConfig": &dcl.Property{ - Type: "object", - GoName: "GkeClusterConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig", - Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "namespacedGkeDeploymentTarget": &dcl.Property{ - Type: "object", - GoName: "NamespacedGkeDeploymentTarget", - GoType: "WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget", - Description: "Optional. A target for the deployment.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "clusterNamespace": &dcl.Property{ - Type: "string", - GoName: "ClusterNamespace", - Description: "Optional. A namespace within the GKE cluster to deploy into.", - Immutable: true, - }, - "targetGkeCluster": &dcl.Property{ - Type: "string", - GoName: "TargetGkeCluster", - Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "name", - }, - }, - }, - }, - }, - }, - }, -{{- end }} - "initializationActions": &dcl.Property{ - Type: "array", - GoName: "InitializationActions", - Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplatePlacementManagedClusterConfigInitializationActions", - Properties: map[string]*dcl.Property{ - "executableFile": &dcl.Property{ - Type: "string", - GoName: "ExecutableFile", - Description: "Required. Cloud Storage URI of executable file.", - Immutable: true, - }, - "executionTimeout": &dcl.Property{ - Type: "string", - GoName: "ExecutionTimeout", - Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", - Immutable: true, - }, - }, - }, - }, - "lifecycleConfig": &dcl.Property{ - Type: "object", - GoName: "LifecycleConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig", - Description: "Optional. Lifecycle setting for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "autoDeleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "AutoDeleteTime", - Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "autoDeleteTtl": &dcl.Property{ - Type: "string", - GoName: "AutoDeleteTtl", - Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "idleDeleteTtl": &dcl.Property{ - Type: "string", - GoName: "IdleDeleteTtl", - Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - "idleStartTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "IdleStartTime", - ReadOnly: true, - Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - Immutable: true, - }, - }, - }, - "masterConfig": &dcl.Property{ - Type: "object", - GoName: "MasterConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfig", - Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMasterConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", -{{- if ne $.TargetVersionName "ga" }} - }, - }, - }, - }, - "metastoreConfig": &dcl.Property{ - Type: "object", - GoName: "MetastoreConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig", - Description: "Optional. Metastore configuration.", - Immutable: true, - Required: []string{ - "dataprocMetastoreService", - }, - Properties: map[string]*dcl.Property{ - "dataprocMetastoreService": &dcl.Property{ - Type: "string", - GoName: "DataprocMetastoreService", - Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Metastore/Service", - Field: "selfLink", - }, -{{- end }} - }, - }, - }, - }, - "secondaryWorkerConfig": &dcl.Property{ - Type: "object", - GoName: "SecondaryWorkerConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig", - Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - }, - }, - }, - }, - "securityConfig": &dcl.Property{ - Type: "object", - GoName: "SecurityConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecurityConfig", - Description: "Optional. Security settings for the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "kerberosConfig": &dcl.Property{ - Type: "object", - GoName: "KerberosConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig", - Description: "Optional. Kerberos related configuration.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "crossRealmTrustAdminServer": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustAdminServer", - Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - Immutable: true, - }, - "crossRealmTrustKdc": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustKdc", - Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - Immutable: true, - }, - "crossRealmTrustRealm": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustRealm", - Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", - Immutable: true, - }, - "crossRealmTrustSharedPassword": &dcl.Property{ - Type: "string", - GoName: "CrossRealmTrustSharedPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", - Immutable: true, - }, - "enableKerberos": &dcl.Property{ - Type: "boolean", - GoName: "EnableKerberos", - Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", - Immutable: true, - }, - "kdcDbKey": &dcl.Property{ - Type: "string", - GoName: "KdcDbKey", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", - Immutable: true, - }, - "keyPassword": &dcl.Property{ - Type: "string", - GoName: "KeyPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - "keystore": &dcl.Property{ - Type: "string", - GoName: "Keystore", - Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - Immutable: true, - }, - "keystorePassword": &dcl.Property{ - Type: "string", - GoName: "KeystorePassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - "kmsKey": &dcl.Property{ - Type: "string", - GoName: "KmsKey", - Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudkms/CryptoKey", - Field: "selfLink", - }, - }, - }, - "realm": &dcl.Property{ - Type: "string", - GoName: "Realm", - Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", - Immutable: true, - }, - "rootPrincipalPassword": &dcl.Property{ - Type: "string", - GoName: "RootPrincipalPassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", - Immutable: true, - }, - "tgtLifetimeHours": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "TgtLifetimeHours", - Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", - Immutable: true, - }, - "truststore": &dcl.Property{ - Type: "string", - GoName: "Truststore", - Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - Immutable: true, - }, - "truststorePassword": &dcl.Property{ - Type: "string", - GoName: "TruststorePassword", - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", - Immutable: true, - }, - }, - }, - }, - }, - "softwareConfig": &dcl.Property{ - Type: "object", - GoName: "SoftwareConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig", - Description: "Optional. The config settings for software inside the cluster.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "imageVersion": &dcl.Property{ - Type: "string", - GoName: "ImageVersion", - Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", - Immutable: true, - }, - "optionalComponents": &dcl.Property{ - Type: "array", - GoName: "OptionalComponents", - Description: "Optional. The set of components to activate on the cluster.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsEnum", - Enum: []string{ - "COMPONENT_UNSPECIFIED", - "ANACONDA", - "DOCKER", - "DRUID", - "FLINK", - "HBASE", - "HIVE_WEBHCAT", - "JUPYTER", - "KERBEROS", - "PRESTO", - "RANGER", - "SOLR", - "ZEPPELIN", - "ZOOKEEPER", - }, - }, - }, - "properties": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Properties", - Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - Immutable: true, - }, - }, - }, - "stagingBucket": &dcl.Property{ - Type: "string", - GoName: "StagingBucket", - Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Storage/Bucket", - Field: "name", - }, - }, - }, - "tempBucket": &dcl.Property{ - Type: "string", - GoName: "TempBucket", - Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Storage/Bucket", - Field: "name", - }, - }, - }, - "workerConfig": &dcl.Property{ - Type: "object", - GoName: "WorkerConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfig", - Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "accelerators": &dcl.Property{ - Type: "array", - GoName: "Accelerators", - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Immutable: true, - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerators", - Properties: map[string]*dcl.Property{ - "acceleratorCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AcceleratorCount", - Description: "The number of the accelerator cards of this type exposed to this instance.", - Immutable: true, - }, - "acceleratorType": &dcl.Property{ - Type: "string", - GoName: "AcceleratorType", - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - Immutable: true, - }, - }, - }, - }, - "diskConfig": &dcl.Property{ - Type: "object", - GoName: "DiskConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig", - Description: "Optional. Disk option config settings.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bootDiskSizeGb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "BootDiskSizeGb", - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - Immutable: true, - }, - "bootDiskType": &dcl.Property{ - Type: "string", - GoName: "BootDiskType", - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - Immutable: true, - }, - "numLocalSsds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumLocalSsds", - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - Immutable: true, - ServerDefault: true, - }, - }, - }, - "image": &dcl.Property{ - Type: "string", - GoName: "Image", - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Image", - Field: "selfLink", - }, - }, - }, - "instanceNames": &dcl.Property{ - Type: "array", - GoName: "InstanceNames", - ReadOnly: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Immutable: true, - ServerDefault: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Compute/Instance", - Field: "selfLink", - }, - }, - }, - }, - "isPreemptible": &dcl.Property{ - Type: "boolean", - GoName: "IsPreemptible", - ReadOnly: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - Immutable: true, - }, - "machineType": &dcl.Property{ - Type: "string", - GoName: "MachineType", - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - Immutable: true, - }, - "managedGroupConfig": &dcl.Property{ - Type: "object", - GoName: "ManagedGroupConfig", - GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig", - ReadOnly: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Immutable: true, - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "instanceGroupManagerName": &dcl.Property{ - Type: "string", - GoName: "InstanceGroupManagerName", - ReadOnly: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - Immutable: true, - }, - "instanceTemplateName": &dcl.Property{ - Type: "string", - GoName: "InstanceTemplateName", - ReadOnly: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - Immutable: true, - }, - }, - }, - "minCpuPlatform": &dcl.Property{ - Type: "string", - GoName: "MinCpuPlatform", - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - Immutable: true, - ServerDefault: true, - }, - "numInstances": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumInstances", - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - Immutable: true, - }, - "preemptibility": &dcl.Property{ - Type: "string", - GoName: "Preemptibility", - GoType: "WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPreemptibilityEnum", - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - Immutable: true, - Enum: []string{ - "PREEMPTIBILITY_UNSPECIFIED", - "NON_PREEMPTIBLE", - "PREEMPTIBLE", - }, - }, - }, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", - Immutable: true, - }, - }, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time template was last updated.", - Immutable: true, - }, - "version": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Version", - ReadOnly: true, - Description: "Output only. The current version of this workflow template.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/firebaserules/release_schema.go b/mmv1/third_party/terraform/services/firebaserules/release_schema.go deleted file mode 100644 index 95f0171e816f..000000000000 --- a/mmv1/third_party/terraform/services/firebaserules/release_schema.go +++ /dev/null @@ -1,158 +0,0 @@ -package firebaserules - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLReleaseSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Firebaserules/Release", - StructName: "Release", - Reference: &dcl.Link{ - Text: "Firebase Rules API Documentation", - URL: "https://firebase.google.com/docs/reference/rules/rest#rest-resource:-v1.projects.releases", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Get started with Firebase Security Rules", - URL: "https://firebase.google.com/docs/rules/get-started", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Release", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "release", - Required: true, - Description: "A full instance of a Release", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Release", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "release", - Required: true, - Description: "A full instance of a Release", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Release", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "release", - Required: true, - Description: "A full instance of a Release", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Release", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Release", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Release": &dcl.Component{ - Title: "Release", - ID: "projects/{{project}}/releases/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "rulesetName", - "project", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Time the release was created.", - Immutable: true, - }, - "disabled": &dcl.Property{ - Type: "boolean", - GoName: "Disabled", - ReadOnly: true, - Description: "Disable the release to keep it from being served. The response code of NOT_FOUND will be given for executables generated from this Release.", - Immutable: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Format: `projects/{project_id}/releases/{release_id}`\\Firestore Rules Releases will **always** have the name 'cloud.firestore'", - Immutable: true, - ForwardSlashAllowed: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "rulesetName": &dcl.Property{ - Type: "string", - GoName: "RulesetName", - Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Firebaserules/Ruleset", - Field: "name", - }, - }, - HasLongForm: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. Time the release was updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go b/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go deleted file mode 100644 index 3044e24880e4..000000000000 --- a/mmv1/third_party/terraform/services/firebaserules/ruleset_schema.go +++ /dev/null @@ -1,211 +0,0 @@ -package firebaserules - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLRulesetSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Firebaserules/Ruleset", - StructName: "Ruleset", - Reference: &dcl.Link{ - Text: "Firebase Ruleset API Documentation", - URL: "https://firebase.google.com/docs/reference/rules/rest#rest-resource:-v1.projects.rulesets", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Get started with Firebase Security Rules", - URL: "https://firebase.google.com/docs/rules/get-started", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Ruleset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "ruleset", - Required: true, - Description: "A full instance of a Ruleset", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Ruleset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "ruleset", - Required: true, - Description: "A full instance of a Ruleset", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Ruleset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "ruleset", - Required: true, - Description: "A full instance of a Ruleset", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Ruleset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Ruleset", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Ruleset": &dcl.Component{ - Title: "Ruleset", - ID: "projects/{{project}}/rulesets/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "source", - "project", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. Time the `Ruleset` was created.", - Immutable: true, - }, - "metadata": &dcl.Property{ - Type: "object", - GoName: "Metadata", - GoType: "RulesetMetadata", - ReadOnly: true, - Description: "Output only. The metadata for this ruleset.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "services": &dcl.Property{ - Type: "array", - GoName: "Services", - Description: "Services that this ruleset has declarations for (e.g., \"cloud.firestore\"). There may be 0+ of these.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Output only. Name of the `Ruleset`. The ruleset_id is auto generated by the service. Format: `projects/{project_id}/rulesets/{ruleset_id}`", - Immutable: true, - ServerGeneratedParameter: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "source": &dcl.Property{ - Type: "object", - GoName: "Source", - GoType: "RulesetSource", - Description: "`Source` for the `Ruleset`.", - Immutable: true, - Required: []string{ - "files", - }, - Properties: map[string]*dcl.Property{ - "files": &dcl.Property{ - Type: "array", - GoName: "Files", - Description: "`File` set constituting the `Source` bundle.", - Immutable: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "RulesetSourceFiles", - Required: []string{ - "content", - "name", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "Textual Content.", - Immutable: true, - }, - "fingerprint": &dcl.Property{ - Type: "string", - GoName: "Fingerprint", - Description: "Fingerprint (e.g. github sha) associated with the `File`.", - Immutable: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "File name.", - Immutable: true, - }, - }, - }, - }, - "language": &dcl.Property{ - Type: "string", - GoName: "Language", - GoType: "RulesetSourceLanguageEnum", - Description: "`Language` of the `Source` bundle. If unspecified, the language will default to `FIREBASE_RULES`. Possible values: LANGUAGE_UNSPECIFIED, FIREBASE_RULES, EVENT_FLOW_TRIGGERS", - Immutable: true, - Enum: []string{ - "LANGUAGE_UNSPECIFIED", - "FIREBASE_RULES", - "EVENT_FLOW_TRIGGERS", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl deleted file mode 100644 index fcc410a0896e..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/feature_membership_schema.go.tmpl +++ /dev/null @@ -1,807 +0,0 @@ -package gkehub - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLFeatureMembershipSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "GkeHub/FeatureMembership", - Description: "The GkeHub FeatureMembership resource", - StructName: "FeatureMembership", - Mutex: "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a FeatureMembership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "featureMembership", - Required: true, - Description: "A full instance of a FeatureMembership", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a FeatureMembership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "featureMembership", - Required: true, - Description: "A full instance of a FeatureMembership", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a FeatureMembership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "featureMembership", - Required: true, - Description: "A full instance of a FeatureMembership", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all FeatureMembership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "feature", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many FeatureMembership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "feature", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "FeatureMembership": &dcl.Component{ - Title: "FeatureMembership", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}feature{{ "}}" }}/memberships/{{ "{{" }}membership{{ "}}" }}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "project", - "location", - "feature", - "membership", - }, - Properties: map[string]*dcl.Property{ - "configmanagement": &dcl.Property{ - Type: "object", - GoName: "Configmanagement", - GoType: "FeatureMembershipConfigmanagement", - Description: "Config Management-specific spec.", - Properties: map[string]*dcl.Property{ - "configSync": &dcl.Property{ - Type: "object", - GoName: "ConfigSync", - GoType: "FeatureMembershipConfigmanagementConfigSync", - Description: "Config Sync configuration for the cluster.", - SendEmpty: true, - Properties: map[string]*dcl.Property{ - "deploymentOverrides": &dcl.Property{ - Type: "array", - GoName: "DeploymentOverrides", - Description: "The override configurations for the Config Sync Deployments.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "FeatureMembershipConfigmanagementConfigSyncDeploymentOverrides", - Properties: map[string]*dcl.Property{ - "containers": &dcl.Property{ - Type: "array", - GoName: "Containers", - Description: "The override configurations for the containers in the Deployment.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "FeatureMembershipConfigmanagementConfigSyncDeploymentOverridesContainers", - Properties: map[string]*dcl.Property{ - "containerName": &dcl.Property{ - Type: "string", - GoName: "ContainerName", - Description: "The name of the container.", - }, - "cpuLimit": &dcl.Property{ - Type: "string", - GoName: "CpuLimit", - Description: "The CPU limit of the container.", - }, - "cpuRequest": &dcl.Property{ - Type: "string", - GoName: "CpuRequest", - Description: "The CPU request of the container.", - }, - "memoryLimit": &dcl.Property{ - Type: "string", - GoName: "MemoryLimit", - Description: "The memory limit of the container.", - }, - "memoryRequest": &dcl.Property{ - Type: "string", - GoName: "MemoryRequest", - Description: "The memory request of the container.", - }, - }, - }, - }, - "deploymentName": &dcl.Property{ - Type: "string", - GoName: "DeploymentName", - Description: "The name of the Deployment.", - }, - "deploymentNamespace": &dcl.Property{ - Type: "string", - GoName: "DeploymentNamespace", - Description: "The namespace of the Deployment.", - }, - }, - }, - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", - }, - "git": &dcl.Property{ - Type: "object", - GoName: "Git", - GoType: "FeatureMembershipConfigmanagementConfigSyncGit", - Properties: map[string]*dcl.Property{ - "gcpServiceAccountEmail": &dcl.Property{ - Type: "string", - GoName: "GcpServiceAccountEmail", - Description: "The GCP Service Account Email used for auth when secretType is gcpServiceAccount.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - }, - "httpsProxy": &dcl.Property{ - Type: "string", - GoName: "HttpsProxy", - Description: "URL for the HTTPS proxy to be used when communicating with the Git repo.", - }, - "policyDir": &dcl.Property{ - Type: "string", - GoName: "PolicyDir", - Description: "The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository.", - }, - "secretType": &dcl.Property{ - Type: "string", - GoName: "SecretType", - Description: "Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount or none. The validation of this is case-sensitive.", - }, - "syncBranch": &dcl.Property{ - Type: "string", - GoName: "SyncBranch", - Description: "The branch of the repository to sync from. Default: master.", - }, - "syncRepo": &dcl.Property{ - Type: "string", - GoName: "SyncRepo", - Description: "The URL of the Git repository to use as the source of truth.", - }, - "syncRev": &dcl.Property{ - Type: "string", - GoName: "SyncRev", - Description: "Git revision (tag or hash) to check out. Default HEAD.", - }, - "syncWaitSecs": &dcl.Property{ - Type: "string", - GoName: "SyncWaitSecs", - Description: "Period in seconds between consecutive syncs. Default: 15.", - }, - }, - }, - "metricsGcpServiceAccountEmail": &dcl.Property{ - Type: "string", - GoName: "MetricsGcpServiceAccountEmail", - Description: "Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - Deprecated: true, - }, - "oci": &dcl.Property{ - Type: "object", - GoName: "Oci", - GoType: "FeatureMembershipConfigmanagementConfigSyncOci", - Properties: map[string]*dcl.Property{ - "gcpServiceAccountEmail": &dcl.Property{ - Type: "string", - GoName: "GcpServiceAccountEmail", - Description: "The GCP Service Account Email used for auth when secret_type is gcpserviceaccount. ", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Iam/ServiceAccount", - Field: "email", - }, - }, - }, - "policyDir": &dcl.Property{ - Type: "string", - GoName: "PolicyDir", - Description: "The absolute path of the directory that contains the local resources. Default: the root directory of the image.", - }, - "secretType": &dcl.Property{ - Type: "string", - GoName: "SecretType", - Description: "Type of secret configured for access to the OCI Image. Must be one of gcenode, gcpserviceaccount or none. The validation of this is case-sensitive.", - }, - "syncRepo": &dcl.Property{ - Type: "string", - GoName: "SyncRepo", - Description: "The OCI image repository URL for the package to sync from. e.g. LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME.", - }, - "syncWaitSecs": &dcl.Property{ - Type: "string", - GoName: "SyncWaitSecs", - Description: "Period in seconds(int64 format) between consecutive syncs. Default: 15.", - }, - }, - }, - "preventDrift": &dcl.Property{ - Type: "boolean", - GoName: "PreventDrift", - Description: "Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", - ServerDefault: true, - }, - "sourceFormat": &dcl.Property{ - Type: "string", - GoName: "SourceFormat", - Description: "Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", - }, - "stopSyncing": &dcl.Property{ - Type: "boolean", - GoName: "StopSyncing", - Description: "Set to true to stop syncing configs for a single cluster. Default: false.", - }, - }, - }, - "hierarchyController": &dcl.Property{ - Type: "object", - GoName: "HierarchyController", - GoType: "FeatureMembershipConfigmanagementHierarchyController", - Description: "Hierarchy Controller configuration for the cluster.", - SendEmpty: true, - Properties: map[string]*dcl.Property{ - "enableHierarchicalResourceQuota": &dcl.Property{ - Type: "boolean", - GoName: "EnableHierarchicalResourceQuota", - Description: "Whether hierarchical resource quota is enabled in this cluster.", - SendEmpty: true, - }, - "enablePodTreeLabels": &dcl.Property{ - Type: "boolean", - GoName: "EnablePodTreeLabels", - Description: "Whether pod tree labels are enabled in this cluster.", - SendEmpty: true, - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "**DEPRECATED** Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead.", - SendEmpty: true, - }, - }, - }, - "management": &dcl.Property{ - Type: "string", - GoName: "Management", - GoType: "FeatureMembershipConfigmanagementManagementEnum", - Description: "Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.", - ServerDefault: true, - Enum: []string{ - "MANAGEMENT_UNSPECIFIED", - "MANAGEMENT_AUTOMATIC", - "MANAGEMENT_MANUAL", - }, - }, - "policyController": &dcl.Property{ - Type: "object", - GoName: "PolicyController", - GoType: "FeatureMembershipConfigmanagementPolicyController", - Description: "**DEPRECATED** Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead.", - Properties: map[string]*dcl.Property{ - "auditIntervalSeconds": &dcl.Property{ - Type: "string", - GoName: "AuditIntervalSeconds", - Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Enables the installation of Policy Controller. If false, the rest of PolicyController fields take no effect.", - }, - "exemptableNamespaces": &dcl.Property{ - Type: "array", - GoName: "ExemptableNamespaces", - Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "logDeniesEnabled": &dcl.Property{ - Type: "boolean", - GoName: "LogDeniesEnabled", - Description: "Logs all denies and dry run failures.", - }, - "monitoring": &dcl.Property{ - Type: "object", - GoName: "Monitoring", - GoType: "FeatureMembershipConfigmanagementPolicyControllerMonitoring", - Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "backends": &dcl.Property{ - Type: "array", - GoName: "Backends", - Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "FeatureMembershipConfigmanagementPolicyControllerMonitoringBackendsEnum", - Enum: []string{ - "MONITORING_BACKEND_UNSPECIFIED", - "PROMETHEUS", - "CLOUD_MONITORING", - }, - }, - }, - }, - }, - "mutationEnabled": &dcl.Property{ - Type: "boolean", - GoName: "MutationEnabled", - Description: "Enable or disable mutation in policy controller. If true, mutation CRDs, webhook and controller deployment will be deployed to the cluster.", - }, - "referentialRulesEnabled": &dcl.Property{ - Type: "boolean", - GoName: "ReferentialRulesEnabled", - Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", - }, - "templateLibraryInstalled": &dcl.Property{ - Type: "boolean", - GoName: "TemplateLibraryInstalled", - Description: "Installs the default template library along with Policy Controller.", - }, - }, - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "Optional. Version of ACM to install. Defaults to the latest version.", - ServerDefault: true, - }, - }, - }, - "feature": &dcl.Property{ - Type: "string", - GoName: "Feature", - Description: "The name of the feature", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkehub/Feature", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location of the feature", - Immutable: true, - Parameter: true, - }, - "membership": &dcl.Property{ - Type: "string", - GoName: "Membership", - Description: "The name of the membership", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkehub/Membership", - Field: "name", - }, - }, - Parameter: true, - }, - "membershipLocation": &dcl.Property{ - Type: "string", - GoName: "MembershipLocation", - Description: "The location of the membership", - Immutable: true, - Parameter: true, - }, - "mesh": &dcl.Property{ - Type: "object", - GoName: "Mesh", - GoType: "FeatureMembershipMesh", - Description: "Manage Mesh Features", - Properties: map[string]*dcl.Property{ - "controlPlane": &dcl.Property{ - Type: "string", - GoName: "ControlPlane", - GoType: "FeatureMembershipMeshControlPlaneEnum", - Description: "**DEPRECATED** Whether to automatically manage Service Mesh control planes. Possible values: CONTROL_PLANE_MANAGEMENT_UNSPECIFIED, AUTOMATIC, MANUAL", - Enum: []string{ - "CONTROL_PLANE_MANAGEMENT_UNSPECIFIED", - "AUTOMATIC", - "MANUAL", - }, - }, - "management": &dcl.Property{ - Type: "string", - GoName: "Management", - GoType: "FeatureMembershipMeshManagementEnum", - Description: "Whether to automatically manage Service Mesh. Possible values: MANAGEMENT_UNSPECIFIED, MANAGEMENT_AUTOMATIC, MANAGEMENT_MANUAL", - Enum: []string{ - "MANAGEMENT_UNSPECIFIED", - "MANAGEMENT_AUTOMATIC", - "MANAGEMENT_MANUAL", - }, - }, - }, - }, - "policycontroller": &dcl.Property{ - Type: "object", - GoName: "Policycontroller", - GoType: "FeatureMembershipPolicycontroller", - Description: "Policy Controller-specific spec.", - Required: []string{ - "policyControllerHubConfig", - }, - Properties: map[string]*dcl.Property{ - "policyControllerHubConfig": &dcl.Property{ - Type: "object", - GoName: "PolicyControllerHubConfig", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfig", - Description: "Policy Controller configuration for the cluster.", - Properties: map[string]*dcl.Property{ - "auditIntervalSeconds": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "AuditIntervalSeconds", - Description: "Sets the interval for Policy Controller Audit Scans (in seconds). When set to 0, this disables audit functionality altogether.", - }, - "constraintViolationLimit": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "ConstraintViolationLimit", - Description: "The maximum number of audit violations to be stored in a constraint. If not set, the internal default of 20 will be used.", - }, - "deploymentConfigs": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "object", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigs", - Properties: map[string]*dcl.Property{ - "containerResources": &dcl.Property{ - Type: "object", - GoName: "ContainerResources", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResources", - Description: "Container resource requirements.", - Conflicts: []string{ - "replicaCount", - "podAffinity", - "podTolerations", - }, - Properties: map[string]*dcl.Property{ - "limits": &dcl.Property{ - Type: "object", - GoName: "Limits", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesLimits", - Description: "Limits describes the maximum amount of compute resources allowed for use by the running container.", - Properties: map[string]*dcl.Property{ - "cpu": &dcl.Property{ - Type: "string", - GoName: "Cpu", - Description: "CPU requirement expressed in Kubernetes resource units.", - }, - "memory": &dcl.Property{ - Type: "string", - GoName: "Memory", - Description: "Memory requirement expressed in Kubernetes resource units.", - }, - }, - }, - "requests": &dcl.Property{ - Type: "object", - GoName: "Requests", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsContainerResourcesRequests", - Description: "Requests describes the amount of compute resources reserved for the container by the kube-scheduler.", - Properties: map[string]*dcl.Property{ - "cpu": &dcl.Property{ - Type: "string", - GoName: "Cpu", - Description: "CPU requirement expressed in Kubernetes resource units.", - }, - "memory": &dcl.Property{ - Type: "string", - GoName: "Memory", - Description: "Memory requirement expressed in Kubernetes resource units.", - }, - }, - }, - }, - }, - "podAffinity": &dcl.Property{ - Type: "string", - GoName: "PodAffinity", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodAffinityEnum", - Description: "Pod affinity configuration. Possible values: AFFINITY_UNSPECIFIED, NO_AFFINITY, ANTI_AFFINITY", - Conflicts: []string{ - "replicaCount", - "containerResources", - "podTolerations", - }, - Enum: []string{ - "AFFINITY_UNSPECIFIED", - "NO_AFFINITY", - "ANTI_AFFINITY", - }, - }, - "podTolerations": &dcl.Property{ - Type: "array", - GoName: "PodTolerations", - Description: "Pod tolerations of node taints.", - Conflicts: []string{ - "replicaCount", - "containerResources", - "podAffinity", - }, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigDeploymentConfigsPodTolerations", - Properties: map[string]*dcl.Property{ - "effect": &dcl.Property{ - Type: "string", - GoName: "Effect", - Description: "Matches a taint effect.", - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "Matches a taint key (not necessarily unique).", - }, - "operator": &dcl.Property{ - Type: "string", - GoName: "Operator", - Description: "Matches a taint operator.", - }, - "value": &dcl.Property{ - Type: "string", - GoName: "Value", - Description: "Matches a taint value.", - }, - }, - }, - }, - "replicaCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "ReplicaCount", - Description: "Pod replica count.", - Conflicts: []string{ - "containerResources", - "podAffinity", - "podTolerations", - }, - }, - }, - }, - GoName: "DeploymentConfigs", - Description: "Map of deployment configs to deployments (\"admission\", \"audit\", \"mutation\").", - ServerDefault: true, - }, - "exemptableNamespaces": &dcl.Property{ - Type: "array", - GoName: "ExemptableNamespaces", - Description: "The set of namespaces that are excluded from Policy Controller checks. Namespaces do not need to currently exist on the cluster.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "installSpec": &dcl.Property{ - Type: "string", - GoName: "InstallSpec", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigInstallSpecEnum", - Description: "Configures the mode of the Policy Controller installation. Possible values: INSTALL_SPEC_UNSPECIFIED, INSTALL_SPEC_NOT_INSTALLED, INSTALL_SPEC_ENABLED, INSTALL_SPEC_SUSPENDED, INSTALL_SPEC_DETACHED", - Enum: []string{ - "INSTALL_SPEC_UNSPECIFIED", - "INSTALL_SPEC_NOT_INSTALLED", - "INSTALL_SPEC_ENABLED", - "INSTALL_SPEC_SUSPENDED", - "INSTALL_SPEC_DETACHED", - }, - }, - "logDeniesEnabled": &dcl.Property{ - Type: "boolean", - GoName: "LogDeniesEnabled", - Description: "Logs all denies and dry run failures.", - }, - "monitoring": &dcl.Property{ - Type: "object", - GoName: "Monitoring", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoring", - Description: "Specifies the backends Policy Controller should export metrics to. For example, to specify metrics should be exported to Cloud Monitoring and Prometheus, specify backends: [\"cloudmonitoring\", \"prometheus\"]. Default: [\"cloudmonitoring\", \"prometheus\"]", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "backends": &dcl.Property{ - Type: "array", - GoName: "Backends", - Description: " Specifies the list of backends Policy Controller will export to. Specifying an empty value `[]` disables metrics export.", - ServerDefault: true, - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigMonitoringBackendsEnum", - Enum: []string{ - "MONITORING_BACKEND_UNSPECIFIED", - "PROMETHEUS", - "CLOUD_MONITORING", - }, - }, - }, - }, - }, - "mutationEnabled": &dcl.Property{ - Type: "boolean", - GoName: "MutationEnabled", - Description: "Enables the ability to mutate resources using Policy Controller.", - }, - "policyContent": &dcl.Property{ - Type: "object", - GoName: "PolicyContent", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContent", - Description: "Specifies the desired policy content on the cluster.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "bundles": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "object", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentBundles", - Properties: map[string]*dcl.Property{ - "exemptedNamespaces": &dcl.Property{ - Type: "array", - GoName: "ExemptedNamespaces", - Description: "The set of namespaces to be exempted from the bundle.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - GoName: "Bundles", - Description: "map of bundle name to BundleInstallSpec. The bundle name maps to the `bundleName` key in the `policycontroller.gke.io/constraintData` annotation on a constraint.", - }, - "templateLibrary": &dcl.Property{ - Type: "object", - GoName: "TemplateLibrary", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibrary", - Description: "Configures the installation of the Template Library.", - ServerDefault: true, - Properties: map[string]*dcl.Property{ - "installation": &dcl.Property{ - Type: "string", - GoName: "Installation", - GoType: "FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryInstallationEnum", - Description: "Configures the manner in which the template library is installed on the cluster. Possible values: INSTALLATION_UNSPECIFIED, NOT_INSTALLED, ALL", - Enum: []string{ - "INSTALLATION_UNSPECIFIED", - "NOT_INSTALLED", - "ALL", - }, - }, - }, - }, - }, - }, - "referentialRulesEnabled": &dcl.Property{ - Type: "boolean", - GoName: "ReferentialRulesEnabled", - Description: "Enables the ability to use Constraint Templates that reference to objects other than the object currently being evaluated.", - }, - }, - }, - "version": &dcl.Property{ - Type: "string", - GoName: "Version", - Description: "Optional. Version of Policy Controller to install. Defaults to the latest version.", - ServerDefault: true, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project of the feature", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl deleted file mode 100644 index 9b8b04f8d265..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/feature_schema.go.tmpl +++ /dev/null @@ -1,331 +0,0 @@ -package gkehub - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLFeatureSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "GkeHub/Feature", - Description: "The GkeHub Feature resource", - StructName: "Feature", - Mutex: "{{ "{{" }}project{{ "}}" }}/{{ "{{" }}location{{ "}}" }}/{{ "{{" }}feature{{ "}}" }}", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Feature", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "feature", - Required: true, - Description: "A full instance of a Feature", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Feature", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "feature", - Required: true, - Description: "A full instance of a Feature", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Feature", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "feature", - Required: true, - Description: "A full instance of a Feature", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Feature", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Feature", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Feature": &dcl.Component{ - Title: "Feature", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. When the Feature resource was created.", - Immutable: true, - }, - "deleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "DeleteTime", - ReadOnly: true, - Description: "Output only. When the Feature resource was deleted.", - Immutable: true, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "GCP labels for this Feature.", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The full, unique name of this Feature resource", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "resourceState": &dcl.Property{ - Type: "object", - GoName: "ResourceState", - GoType: "FeatureResourceState", - ReadOnly: true, - Description: "State of the Feature resource itself.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "hasResources": &dcl.Property{ - Type: "boolean", - GoName: "HasResources", - ReadOnly: true, - Description: "Whether this Feature has outstanding resources that need to be cleaned up before it can be disabled.", - Immutable: true, - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "FeatureResourceStateStateEnum", - ReadOnly: true, - Description: "The current state of the Feature resource in the Hub API. Possible values: STATE_UNSPECIFIED, ENABLING, ACTIVE, DISABLING, UPDATING, SERVICE_UPDATING", - Immutable: true, - Enum: []string{ - "STATE_UNSPECIFIED", - "ENABLING", - "ACTIVE", - "DISABLING", - "UPDATING", - "SERVICE_UPDATING", - }, - }, - }, - }, - "spec": &dcl.Property{ - Type: "object", - GoName: "Spec", - GoType: "FeatureSpec", - Description: "Optional. Hub-wide Feature configuration. If this Feature does not support any Hub-wide configuration, this field may be unused.", - Properties: map[string]*dcl.Property{ - "fleetobservability": &dcl.Property{ - Type: "object", - GoName: "Fleetobservability", - GoType: "FeatureSpecFleetobservability", - Description: "Fleet Observability spec.", - Properties: map[string]*dcl.Property{ - "loggingConfig": &dcl.Property{ - Type: "object", - GoName: "LoggingConfig", - GoType: "FeatureSpecFleetobservabilityLoggingConfig", - Description: "Fleet Observability Logging-specific spec.", - Properties: map[string]*dcl.Property{ - "defaultConfig": &dcl.Property{ - Type: "object", - GoName: "DefaultConfig", - GoType: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", - Description: "Specified if applying the default routing config to logs not specified in other configs.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum", - Description: "The logs routing mode Possible values: MODE_UNSPECIFIED, COPY, MOVE", - Enum: []string{ - "MODE_UNSPECIFIED", - "COPY", - "MOVE", - }, - }, - }, - }, - "fleetScopeLogsConfig": &dcl.Property{ - Type: "object", - GoName: "FleetScopeLogsConfig", - GoType: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", - Description: "Specified if applying the routing config to all logs for all fleet scopes.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum", - Description: "The logs routing mode Possible values: MODE_UNSPECIFIED, COPY, MOVE", - Enum: []string{ - "MODE_UNSPECIFIED", - "COPY", - "MOVE", - }, - }, - }, - }, - }, - }, - }, - }, - "multiclusteringress": &dcl.Property{ - Type: "object", - GoName: "Multiclusteringress", - GoType: "FeatureSpecMulticlusteringress", - Description: "Multicluster Ingress-specific spec.", - Required: []string{ - "configMembership", - }, - Properties: map[string]*dcl.Property{ - "configMembership": &dcl.Property{ - Type: "string", - GoName: "ConfigMembership", - Description: "Fully-qualified Membership name which hosts the MultiClusterIngress CRD. Example: `projects/foo-proj/locations/global/memberships/bar`", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Gkehub/Membership", - Field: "name", - }, - }, - }, - }, - }, - }, - }, - "state": &dcl.Property{ - Type: "object", - GoName: "State", - GoType: "FeatureState", - ReadOnly: true, - Description: "Output only. The Hub-wide Feature state", - Immutable: true, - Properties: map[string]*dcl.Property{ - "state": &dcl.Property{ - Type: "object", - GoName: "State", - GoType: "FeatureStateState", - ReadOnly: true, - Description: "Output only. The \"running state\" of the Feature in this Hub.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "code": &dcl.Property{ - Type: "string", - GoName: "Code", - GoType: "FeatureStateStateCodeEnum", - ReadOnly: true, - Description: "The high-level, machine-readable status of this Feature. Possible values: CODE_UNSPECIFIED, OK, WARNING, ERROR", - Immutable: true, - Enum: []string{ - "CODE_UNSPECIFIED", - "OK", - "WARNING", - "ERROR", - }, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - ReadOnly: true, - Description: "A human-readable description of the current status.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - GoName: "UpdateTime", - ReadOnly: true, - Description: "The time this status and any related Feature-specific details were updated. A timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits. Examples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\"", - Immutable: true, - }, - }, - }, - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. When the Feature resource was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl deleted file mode 100644 index ebdafd24155b..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/membership_schema.go.tmpl +++ /dev/null @@ -1,410 +0,0 @@ -package gkehub - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLMembershipSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "GkeHub/Membership", - Description: "The GkeHub Membership resource", - StructName: "Membership", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Membership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "membership", - Required: true, - Description: "A full instance of a Membership", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Membership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "membership", - Required: true, - Description: "A full instance of a Membership", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Membership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "membership", - Required: true, - Description: "A full instance of a Membership", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Membership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Membership", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Membership": &dcl.Component{ - Title: "Membership", - ID: "projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", - UsesStateHint: true, - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "authority": &dcl.Property{ - Type: "object", - GoName: "Authority", - GoType: "MembershipAuthority", - Description: "Optional. How to identify workloads from this Membership. See the documentation on Workload Identity for more details: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity", - Properties: map[string]*dcl.Property{ - "identityProvider": &dcl.Property{ - Type: "string", - GoName: "IdentityProvider", - ReadOnly: true, - Description: "Output only. An identity provider that reflects the `issuer` in the workload identity pool.", - }, - "issuer": &dcl.Property{ - Type: "string", - GoName: "Issuer", - Description: "Optional. A JSON Web Token (JWT) issuer URI. `issuer` must start with `https://` and be a valid URL with length <2000 characters. If set, then Google will allow valid OIDC tokens from this issuer to authenticate within the workload_identity_pool. OIDC discovery will be performed on this URI to validate tokens from the issuer. Clearing `issuer` disables Workload Identity. `issuer` cannot be directly modified; it must be cleared (and Workload Identity disabled) before using a new issuer (and re-enabling Workload Identity).", - }, - "workloadIdentityPool": &dcl.Property{ - Type: "string", - GoName: "WorkloadIdentityPool", - ReadOnly: true, - Description: "Output only. The name of the workload identity pool in which `issuer` will be recognized. There is a single Workload Identity Pool per Hub that is shared between all Memberships that belong to that Hub. For a Hub hosted in: {PROJECT_ID}, the workload pool format is `{PROJECT_ID}.hub.id.goog`, although this is subject to change in newer versions of this API.", - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. When the Membership was created.", - Immutable: true, - }, - "deleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "DeleteTime", - ReadOnly: true, - Description: "Output only. When the Membership was deleted.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Description of this membership, limited to 63 characters. Must match the regex: `*` This field is present for legacy purposes.", - }, - "endpoint": &dcl.Property{ - Type: "object", - GoName: "Endpoint", - GoType: "MembershipEndpoint", - Description: "Optional. Endpoint information to reach this member.", - Properties: map[string]*dcl.Property{ - "gkeCluster": &dcl.Property{ - Type: "object", - GoName: "GkeCluster", - GoType: "MembershipEndpointGkeCluster", - Description: "Optional. GKE-specific information. Only present if this Membership is a GKE cluster.", - Properties: map[string]*dcl.Property{ - "resourceLink": &dcl.Property{ - Type: "string", - GoName: "ResourceLink", - Description: "Immutable. Self-link of the GCP resource for the GKE cluster. For example: //container.googleapis.com/projects/my-project/locations/us-west1-a/clusters/my-cluster Zonal clusters are also supported.", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Container/Cluster", - Field: "selfLink", - }, - }, - }, - }, - }, - "kubernetesMetadata": &dcl.Property{ - Type: "object", - GoName: "KubernetesMetadata", - GoType: "MembershipEndpointKubernetesMetadata", - ReadOnly: true, - Description: "Output only. Useful Kubernetes-specific metadata.", - Properties: map[string]*dcl.Property{ - "kubernetesApiServerVersion": &dcl.Property{ - Type: "string", - GoName: "KubernetesApiServerVersion", - ReadOnly: true, - Description: "Output only. Kubernetes API server version string as reported by `/version`.", - }, - "memoryMb": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "MemoryMb", - ReadOnly: true, - Description: "Output only. The total memory capacity as reported by the sum of all Kubernetes nodes resources, defined in MB.", - }, - "nodeCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NodeCount", - ReadOnly: true, - Description: "Output only. Node count as reported by Kubernetes nodes resources.", - }, - "nodeProviderId": &dcl.Property{ - Type: "string", - GoName: "NodeProviderId", - ReadOnly: true, - Description: "Output only. Node providerID as reported by the first node in the list of nodes on the Kubernetes endpoint. On Kubernetes platforms that support zero-node clusters (like GKE-on-GCP), the node_count will be zero and the node_provider_id will be empty.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time at which these details were last updated. This update_time is different from the Membership-level update_time since EndpointDetails are updated internally for API consumers.", - }, - "vcpuCount": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "VcpuCount", - ReadOnly: true, - Description: "Output only. vCPU count as reported by Kubernetes nodes resources.", - }, - }, - }, - "kubernetesResource": &dcl.Property{ - Type: "object", - GoName: "KubernetesResource", - GoType: "MembershipEndpointKubernetesResource", - Description: "Optional. The in-cluster Kubernetes Resources that should be applied for a correctly registered cluster, in the steady state. These resources: * Ensure that the cluster is exclusively registered to one and only one Hub Membership. * Propagate Workload Pool Information available in the Membership Authority field. * Ensure proper initial configuration of default Hub Features.", - Properties: map[string]*dcl.Property{ - "connectResources": &dcl.Property{ - Type: "array", - GoName: "ConnectResources", - ReadOnly: true, - Description: "Output only. The Kubernetes resources for installing the GKE Connect agent This field is only populated in the Membership returned from a successful long-running operation from CreateMembership or UpdateMembership. It is not populated during normal GetMembership or ListMemberships requests. To get the resource manifest after the initial registration, the caller should make a UpdateMembership call with an empty field mask.", - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "MembershipEndpointKubernetesResourceConnectResources", - Properties: map[string]*dcl.Property{ - "clusterScoped": &dcl.Property{ - Type: "boolean", - GoName: "ClusterScoped", - Description: "Whether the resource provided in the manifest is `cluster_scoped`. If unset, the manifest is assumed to be namespace scoped. This field is used for REST mapping when applying the resource in a cluster.", - }, - "manifest": &dcl.Property{ - Type: "string", - GoName: "Manifest", - Description: "YAML manifest of the resource.", - }, - }, - }, - }, - "membershipCrManifest": &dcl.Property{ - Type: "string", - GoName: "MembershipCrManifest", - Description: "Input only. The YAML representation of the Membership CR. This field is ignored for GKE clusters where Hub can read the CR directly. Callers should provide the CR that is currently present in the cluster during CreateMembership or UpdateMembership, or leave this field empty if none exists. The CR manifest is used to validate the cluster has not been registered with another Membership.", - Unreadable: true, - }, - "membershipResources": &dcl.Property{ - Type: "array", - GoName: "MembershipResources", - ReadOnly: true, - Description: "Output only. Additional Kubernetes resources that need to be applied to the cluster after Membership creation, and after every update. This field is only populated in the Membership returned from a successful long-running operation from CreateMembership or UpdateMembership. It is not populated during normal GetMembership or ListMemberships requests. To get the resource manifest after the initial registration, the caller should make a UpdateMembership call with an empty field mask.", - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "MembershipEndpointKubernetesResourceMembershipResources", - Properties: map[string]*dcl.Property{ - "clusterScoped": &dcl.Property{ - Type: "boolean", - GoName: "ClusterScoped", - Description: "Whether the resource provided in the manifest is `cluster_scoped`. If unset, the manifest is assumed to be namespace scoped. This field is used for REST mapping when applying the resource in a cluster.", - }, - "manifest": &dcl.Property{ - Type: "string", - GoName: "Manifest", - Description: "YAML manifest of the resource.", - }, - }, - }, - }, - "resourceOptions": &dcl.Property{ - Type: "object", - GoName: "ResourceOptions", - GoType: "MembershipEndpointKubernetesResourceResourceOptions", - Description: "Optional. Options for Kubernetes resource generation.", - Properties: map[string]*dcl.Property{ - "connectVersion": &dcl.Property{ - Type: "string", - GoName: "ConnectVersion", - Description: "Optional. The Connect agent version to use for connect_resources. Defaults to the latest GKE Connect version. The version must be a currently supported version, obsolete versions will be rejected.", - }, - "v1beta1Crd": &dcl.Property{ - Type: "boolean", - GoName: "V1Beta1Crd", - Description: "Optional. Use `apiextensions/v1beta1` instead of `apiextensions/v1` for CustomResourceDefinition resources. This option should be set for clusters with Kubernetes apiserver versions <1.16.", - }, - }, - }, - }, - }, - }, - }, - "externalId": &dcl.Property{ - Type: "string", - GoName: "ExternalId", - Description: "Optional. An externally-generated and managed ID for this Membership. This ID may be modified after creation, but this is not recommended. The ID must match the regex: `*` If this Membership represents a Kubernetes cluster, this value should be set to the UID of the `kube-system` namespace object.", - ServerDefault: true, - }, - "infrastructureType": &dcl.Property{ - Type: "string", - GoName: "InfrastructureType", - GoType: "MembershipInfrastructureTypeEnum", - Description: "Optional. The infrastructure type this Membership is running on. Possible values: INFRASTRUCTURE_TYPE_UNSPECIFIED, ON_PREM, MULTI_CLOUD", - ServerDefault: true, - Enum: []string{ - "INFRASTRUCTURE_TYPE_UNSPECIFIED", - "ON_PREM", - "MULTI_CLOUD", - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Optional. GCP labels for this membership.", - }, - "lastConnectionTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "LastConnectionTime", - ReadOnly: true, - Description: "Output only. For clusters using Connect, the timestamp of the most recent connection established with Google Cloud. This time is updated every several minutes, not continuously. For clusters that do not use GKE Connect, or that have never connected successfully, this field will be unset.", - Immutable: true, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - Parameter: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Output only. The full, unique name of this Membership resource in the format `projects/*/locations/*/memberships/{membership_id}`, set during creation. `membership_id` must be a valid RFC 1123 compliant DNS label: 1. At most 63 characters in length 2. It must consist of lower case alphanumeric characters or `-` 3. It must start and end with an alphanumeric character Which can be expressed as the regex: `)?`, with a maximum length of 63 characters.", - Immutable: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "state": &dcl.Property{ - Type: "object", - GoName: "State", - GoType: "MembershipState", - ReadOnly: true, - Description: "Output only. State of the Membership resource.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "code": &dcl.Property{ - Type: "string", - GoName: "Code", - GoType: "MembershipStateCodeEnum", - ReadOnly: true, - Description: "Output only. The current state of the Membership resource. Possible values: CODE_UNSPECIFIED, CREATING, READY, DELETING, UPDATING, SERVICE_UPDATING", - Immutable: true, - Enum: []string{ - "CODE_UNSPECIFIED", - "CREATING", - "READY", - "DELETING", - "UPDATING", - "SERVICE_UPDATING", - }, - }, - }, - }, - "uniqueId": &dcl.Property{ - Type: "string", - GoName: "UniqueId", - ReadOnly: true, - Description: "Output only. Google-generated UUID for this resource. This is unique across all Membership resources. If a Membership resource is deleted and another resource with the same name is created, it gets a different unique_id.", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. When the Membership was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go b/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go deleted file mode 100644 index 2357be900e69..000000000000 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/key_schema.go +++ /dev/null @@ -1,317 +0,0 @@ -package recaptchaenterprise - -import ( - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -func DCLKeySchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "RecaptchaEnterprise/Key", - Description: "The RecaptchaEnterprise Key resource", - StructName: "Key", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "key", - Required: true, - Description: "A full instance of a Key", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Key", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Key": &dcl.Component{ - Title: "Key", - ID: "projects/{{project}}/keys/{{name}}", - ParentContainer: "project", - LabelsField: "labels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "displayName", - "project", - }, - Properties: map[string]*dcl.Property{ - "androidSettings": &dcl.Property{ - Type: "object", - GoName: "AndroidSettings", - GoType: "KeyAndroidSettings", - Description: "Settings for keys that can be used by Android apps.", - Conflicts: []string{ - "webSettings", - "iosSettings", - }, - Properties: map[string]*dcl.Property{ - "allowAllPackageNames": &dcl.Property{ - Type: "boolean", - GoName: "AllowAllPackageNames", - Description: "If set to true, it means allowed_package_names will not be enforced.", - }, - "allowedPackageNames": &dcl.Property{ - Type: "array", - GoName: "AllowedPackageNames", - Description: "Android package names of apps allowed to use the key. Example: 'com.companyname.appname'", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "The timestamp corresponding to the creation of this Key.", - Immutable: true, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Human-readable display name of this key. Modifiable by user.", - }, - "iosSettings": &dcl.Property{ - Type: "object", - GoName: "IosSettings", - GoType: "KeyIosSettings", - Description: "Settings for keys that can be used by iOS apps.", - Conflicts: []string{ - "webSettings", - "androidSettings", - }, - Properties: map[string]*dcl.Property{ - "allowAllBundleIds": &dcl.Property{ - Type: "boolean", - GoName: "AllowAllBundleIds", - Description: "If set to true, it means allowed_bundle_ids will not be enforced.", - }, - "allowedBundleIds": &dcl.Property{ - Type: "array", - GoName: "AllowedBundleIds", - Description: "iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname'", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels).", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The resource id for the Key, which is the same as the Site Key itself.", - Immutable: true, - ServerGeneratedParameter: true, - HasLongForm: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - Parameter: true, - }, - "testingOptions": &dcl.Property{ - Type: "object", - GoName: "TestingOptions", - GoType: "KeyTestingOptions", - Description: "Options for user acceptance testing.", - Immutable: true, - Properties: map[string]*dcl.Property{ - "testingChallenge": &dcl.Property{ - Type: "string", - GoName: "TestingChallenge", - GoType: "KeyTestingOptionsTestingChallengeEnum", - Description: "For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE", - Immutable: true, - ServerDefault: true, - Enum: []string{ - "TESTING_CHALLENGE_UNSPECIFIED", - "NOCAPTCHA", - "UNSOLVABLE_CHALLENGE", - }, - }, - "testingScore": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "TestingScore", - Description: "All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive.", - Immutable: true, - }, - }, - }, - "wafSettings": &dcl.Property{ - Type: "object", - GoName: "WafSettings", - GoType: "KeyWafSettings", - Description: "Settings specific to keys that can be used for WAF (Web Application Firewall).", - Immutable: true, - Required: []string{ - "wafService", - "wafFeature", - }, - Properties: map[string]*dcl.Property{ - "wafFeature": &dcl.Property{ - Type: "string", - GoName: "WafFeature", - GoType: "KeyWafSettingsWafFeatureEnum", - Description: "Supported WAF features. For more information, see https://cloud.google.com/recaptcha-enterprise/docs/usecase#comparison_of_features. Possible values: CHALLENGE_PAGE, SESSION_TOKEN, ACTION_TOKEN, EXPRESS", - Immutable: true, - Enum: []string{ - "CHALLENGE_PAGE", - "SESSION_TOKEN", - "ACTION_TOKEN", - "EXPRESS", - }, - }, - "wafService": &dcl.Property{ - Type: "string", - GoName: "WafService", - GoType: "KeyWafSettingsWafServiceEnum", - Description: "The WAF service that uses this key. Possible values: CA, FASTLY", - Immutable: true, - Enum: []string{ - "CA", - "FASTLY", - }, - }, - }, - }, - "webSettings": &dcl.Property{ - Type: "object", - GoName: "WebSettings", - GoType: "KeyWebSettings", - Description: "Settings for keys that can be used by websites.", - Conflicts: []string{ - "androidSettings", - "iosSettings", - }, - Required: []string{ - "integrationType", - }, - Properties: map[string]*dcl.Property{ - "allowAllDomains": &dcl.Property{ - Type: "boolean", - GoName: "AllowAllDomains", - Description: "If set to true, it means allowed_domains will not be enforced.", - }, - "allowAmpTraffic": &dcl.Property{ - Type: "boolean", - GoName: "AllowAmpTraffic", - Description: "If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type.", - }, - "allowedDomains": &dcl.Property{ - Type: "array", - GoName: "AllowedDomains", - Description: "Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com'", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "challengeSecurityPreference": &dcl.Property{ - Type: "string", - GoName: "ChallengeSecurityPreference", - GoType: "KeyWebSettingsChallengeSecurityPreferenceEnum", - Description: "Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY", - ServerDefault: true, - Enum: []string{ - "CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED", - "USABILITY", - "BALANCE", - "SECURITY", - }, - }, - "integrationType": &dcl.Property{ - Type: "string", - GoName: "IntegrationType", - GoType: "KeyWebSettingsIntegrationTypeEnum", - Description: "Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE", - Immutable: true, - Enum: []string{ - "SCORE", - "CHECKBOX", - "INVISIBLE", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} From 1b0a5745841bf9672ffbe1d148a8e3629ce543c0 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 11:42:27 -0700 Subject: [PATCH 12/13] Clean up unused gkehub code --- .../terraform/services/gkehub/feature.go.tmpl | 905 ---- .../services/gkehub/feature_internal.go.tmpl | 3616 ---------------- .../services/gkehub/hub_beta_utils.go.tmpl | 122 - .../services/gkehub/hub_utils.go.tmpl | 5 - .../services/gkehub/membership.go.tmpl | 902 ---- .../gkehub/membership_internal.go.tmpl | 3830 ----------------- 6 files changed, 9380 deletions(-) delete mode 100644 mmv1/third_party/terraform/services/gkehub/feature.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/membership.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl diff --git a/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl deleted file mode 100644 index 3b1fb2c99e6b..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/feature.go.tmpl +++ /dev/null @@ -1,905 +0,0 @@ -package gkehub - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -type Feature struct { - Name *string `json:"name"` - Labels map[string]string `json:"labels"` - ResourceState *FeatureResourceState `json:"resourceState"` - Spec *FeatureSpec `json:"spec"` - State *FeatureState `json:"state"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - DeleteTime *string `json:"deleteTime"` - Project *string `json:"project"` - Location *string `json:"location"` -} - -func (r *Feature) String() string { - return dcl.SprintResource(r) -} - -// The enum FeatureResourceStateStateEnum. -type FeatureResourceStateStateEnum string - -// FeatureResourceStateStateEnumRef returns a *FeatureResourceStateStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func FeatureResourceStateStateEnumRef(s string) *FeatureResourceStateStateEnum { - v := FeatureResourceStateStateEnum(s) - return &v -} - -func (v FeatureResourceStateStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STATE_UNSPECIFIED", "ENABLING", "ACTIVE", "DISABLING", "UPDATING", "SERVICE_UPDATING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "FeatureResourceStateStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum. -type FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum string - -// FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef returns a *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef(s string) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { - v := FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(s) - return &v -} - -func (v FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COPY", "MOVE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum. -type FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum string - -// FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef returns a *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef(s string) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { - v := FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(s) - return &v -} - -func (v FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COPY", "MOVE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum FeatureStateStateCodeEnum. -type FeatureStateStateCodeEnum string - -// FeatureStateStateCodeEnumRef returns a *FeatureStateStateCodeEnum with the value of string s -// If the empty string is provided, nil is returned. -func FeatureStateStateCodeEnumRef(s string) *FeatureStateStateCodeEnum { - v := FeatureStateStateCodeEnum(s) - return &v -} - -func (v FeatureStateStateCodeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"CODE_UNSPECIFIED", "OK", "WARNING", "ERROR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "FeatureStateStateCodeEnum", - Value: string(v), - Valid: []string{}, - } -} - -type FeatureResourceState struct { - empty bool `json:"-"` - State *FeatureResourceStateStateEnum `json:"state"` - HasResources *bool `json:"hasResources"` -} - -type jsonFeatureResourceState FeatureResourceState - -func (r *FeatureResourceState) UnmarshalJSON(data []byte) error { - var res jsonFeatureResourceState - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureResourceState - } else { - - r.State = res.State - - r.HasResources = res.HasResources - - } - return nil -} - -// This object is used to assert a desired state where this FeatureResourceState is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureResourceState *FeatureResourceState = &FeatureResourceState{empty: true} - -func (r *FeatureResourceState) Empty() bool { - return r.empty -} - -func (r *FeatureResourceState) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureResourceState) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpec struct { - empty bool `json:"-"` - Multiclusteringress *FeatureSpecMulticlusteringress `json:"multiclusteringress"` - Fleetobservability *FeatureSpecFleetobservability `json:"fleetobservability"` -} - -type jsonFeatureSpec FeatureSpec - -func (r *FeatureSpec) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpec - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpec - } else { - - r.Multiclusteringress = res.Multiclusteringress - - r.Fleetobservability = res.Fleetobservability - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpec is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpec *FeatureSpec = &FeatureSpec{empty: true} - -func (r *FeatureSpec) Empty() bool { - return r.empty -} - -func (r *FeatureSpec) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpec) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpecMulticlusteringress struct { - empty bool `json:"-"` - ConfigMembership *string `json:"configMembership"` -} - -type jsonFeatureSpecMulticlusteringress FeatureSpecMulticlusteringress - -func (r *FeatureSpecMulticlusteringress) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpecMulticlusteringress - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpecMulticlusteringress - } else { - - r.ConfigMembership = res.ConfigMembership - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpecMulticlusteringress is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpecMulticlusteringress *FeatureSpecMulticlusteringress = &FeatureSpecMulticlusteringress{empty: true} - -func (r *FeatureSpecMulticlusteringress) Empty() bool { - return r.empty -} - -func (r *FeatureSpecMulticlusteringress) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpecMulticlusteringress) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpecFleetobservability struct { - empty bool `json:"-"` - LoggingConfig *FeatureSpecFleetobservabilityLoggingConfig `json:"loggingConfig"` -} - -type jsonFeatureSpecFleetobservability FeatureSpecFleetobservability - -func (r *FeatureSpecFleetobservability) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpecFleetobservability - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpecFleetobservability - } else { - - r.LoggingConfig = res.LoggingConfig - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpecFleetobservability is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpecFleetobservability *FeatureSpecFleetobservability = &FeatureSpecFleetobservability{empty: true} - -func (r *FeatureSpecFleetobservability) Empty() bool { - return r.empty -} - -func (r *FeatureSpecFleetobservability) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpecFleetobservability) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpecFleetobservabilityLoggingConfig struct { - empty bool `json:"-"` - DefaultConfig *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig `json:"defaultConfig"` - FleetScopeLogsConfig *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig `json:"fleetScopeLogsConfig"` -} - -type jsonFeatureSpecFleetobservabilityLoggingConfig FeatureSpecFleetobservabilityLoggingConfig - -func (r *FeatureSpecFleetobservabilityLoggingConfig) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpecFleetobservabilityLoggingConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpecFleetobservabilityLoggingConfig - } else { - - r.DefaultConfig = res.DefaultConfig - - r.FleetScopeLogsConfig = res.FleetScopeLogsConfig - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpecFleetobservabilityLoggingConfig *FeatureSpecFleetobservabilityLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{empty: true} - -func (r *FeatureSpecFleetobservabilityLoggingConfig) Empty() bool { - return r.empty -} - -func (r *FeatureSpecFleetobservabilityLoggingConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpecFleetobservabilityLoggingConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpecFleetobservabilityLoggingConfigDefaultConfig struct { - empty bool `json:"-"` - Mode *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum `json:"mode"` -} - -type jsonFeatureSpecFleetobservabilityLoggingConfigDefaultConfig FeatureSpecFleetobservabilityLoggingConfigDefaultConfig - -func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpecFleetobservabilityLoggingConfigDefaultConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfigDefaultConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{empty: true} - -func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) Empty() bool { - return r.empty -} - -func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig struct { - empty bool `json:"-"` - Mode *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum `json:"mode"` -} - -type jsonFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - -func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) UnmarshalJSON(data []byte) error { - var res jsonFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{empty: true} - -func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) Empty() bool { - return r.empty -} - -func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureState struct { - empty bool `json:"-"` - State *FeatureStateState `json:"state"` -} - -type jsonFeatureState FeatureState - -func (r *FeatureState) UnmarshalJSON(data []byte) error { - var res jsonFeatureState - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureState - } else { - - r.State = res.State - - } - return nil -} - -// This object is used to assert a desired state where this FeatureState is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureState *FeatureState = &FeatureState{empty: true} - -func (r *FeatureState) Empty() bool { - return r.empty -} - -func (r *FeatureState) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureState) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type FeatureStateState struct { - empty bool `json:"-"` - Code *FeatureStateStateCodeEnum `json:"code"` - Description *string `json:"description"` - UpdateTime *string `json:"updateTime"` -} - -type jsonFeatureStateState FeatureStateState - -func (r *FeatureStateState) UnmarshalJSON(data []byte) error { - var res jsonFeatureStateState - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyFeatureStateState - } else { - - r.Code = res.Code - - r.Description = res.Description - - r.UpdateTime = res.UpdateTime - - } - return nil -} - -// This object is used to assert a desired state where this FeatureStateState is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyFeatureStateState *FeatureStateState = &FeatureStateState{empty: true} - -func (r *FeatureStateState) Empty() bool { - return r.empty -} - -func (r *FeatureStateState) String() string { - return dcl.SprintResource(r) -} - -func (r *FeatureStateState) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Feature) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "gke_hub", - Type: "Feature", - Version: "beta", - } -} - -func (r *Feature) ID() (string, error) { - if err := extractFeatureFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "resource_state": dcl.ValueOrEmptyString(nr.ResourceState), - "spec": dcl.ValueOrEmptyString(nr.Spec), - "state": dcl.ValueOrEmptyString(nr.State), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", params), nil -} - -const FeatureMaxPage = -1 - -type FeatureList struct { - Items []*Feature - - nextToken string - - pageSize int32 - - resource *Feature -} - -func (l *FeatureList) HasNext() bool { - return l.nextToken != "" -} - -func (l *FeatureList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listFeature(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListFeature(ctx context.Context, project, location string) (*FeatureList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListFeatureWithMaxResults(ctx, project, location, FeatureMaxPage) - -} - -func (c *Client) ListFeatureWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*FeatureList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Feature{ - Project: &project, - Location: &location, - } - items, token, err := c.listFeature(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &FeatureList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetFeature(ctx context.Context, r *Feature) (*Feature, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractFeatureFields(r) - - b, err := c.getFeatureRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalFeature(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeFeatureNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractFeatureFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteFeature(ctx context.Context, r *Feature) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Feature resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Feature...") - deleteOp := deleteFeatureOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllFeature deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllFeature(ctx context.Context, project, location string, filter func(*Feature) bool) error { - listObj, err := c.ListFeature(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllFeature(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllFeature(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyFeature(ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (*Feature, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Feature - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyFeatureHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyFeatureHelper(c *Client, ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (*Feature, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyFeature...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractFeatureFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.featureDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToFeatureDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []featureApiOperation - if create { - ops = append(ops, &createFeatureOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyFeatureDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyFeatureDiff(c *Client, ctx context.Context, desired *Feature, rawDesired *Feature, ops []featureApiOperation, opts ...dcl.ApplyOption) (*Feature, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetFeature(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createFeatureOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapFeature(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeFeatureNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeFeatureNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeFeatureDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractFeatureFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractFeatureFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffFeature(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl deleted file mode 100644 index 4807c97be7a2..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/feature_internal.go.tmpl +++ /dev/null @@ -1,3616 +0,0 @@ -package gkehub - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" -) - -func (r *Feature) validate() error { - - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.ResourceState) { - if err := r.ResourceState.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Spec) { - if err := r.Spec.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.State) { - if err := r.State.validate(); err != nil { - return err - } - } - return nil -} -func (r *FeatureResourceState) validate() error { - return nil -} -func (r *FeatureSpec) validate() error { - if !dcl.IsEmptyValueIndirect(r.Multiclusteringress) { - if err := r.Multiclusteringress.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Fleetobservability) { - if err := r.Fleetobservability.validate(); err != nil { - return err - } - } - return nil -} -func (r *FeatureSpecMulticlusteringress) validate() error { - if err := dcl.Required(r, "configMembership"); err != nil { - return err - } - return nil -} -func (r *FeatureSpecFleetobservability) validate() error { - if !dcl.IsEmptyValueIndirect(r.LoggingConfig) { - if err := r.LoggingConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *FeatureSpecFleetobservabilityLoggingConfig) validate() error { - if !dcl.IsEmptyValueIndirect(r.DefaultConfig) { - if err := r.DefaultConfig.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.FleetScopeLogsConfig) { - if err := r.FleetScopeLogsConfig.validate(); err != nil { - return err - } - } - return nil -} -func (r *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) validate() error { - return nil -} -func (r *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) validate() error { - return nil -} -func (r *FeatureState) validate() error { - if !dcl.IsEmptyValueIndirect(r.State) { - if err := r.State.validate(); err != nil { - return err - } - } - return nil -} -func (r *FeatureStateState) validate() error { - return nil -} -func (r *Feature) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) -} - -// featureApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type featureApiOperation interface { - do(context.Context, *Feature, *Client) error -} - -// newUpdateFeatureUpdateFeatureRequest creates a request for an -// Feature resource's UpdateFeature update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateFeatureUpdateFeatureRequest(ctx context.Context, f *Feature, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - if v, err := expandFeatureSpec(c, f.Spec, res); err != nil { - return nil, fmt.Errorf("error expanding Spec into spec: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["spec"] = v - } - return req, nil -} - -// marshalUpdateFeatureUpdateFeatureRequest converts the update into -// the final JSON request body. -func marshalUpdateFeatureUpdateFeatureRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateFeatureUpdateFeatureOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (c *Client) listFeatureRaw(ctx context.Context, r *Feature, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != FeatureMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listFeatureOperation struct { - Resources []map[string]interface{} `json:"resources"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listFeature(ctx context.Context, r *Feature, pageToken string, pageSize int32) ([]*Feature, string, error) { - b, err := c.listFeatureRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listFeatureOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Feature - for _, v := range m.Resources { - res, err := unmarshalMapFeature(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllFeature(ctx context.Context, f func(*Feature) bool, resources []*Feature) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteFeature(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteFeatureOperation struct{} - -func (op *deleteFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { - r, err := c.GetFeature(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Feature not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetFeature checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetFeature(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createFeatureOperation struct { - response map[string]interface{} -} - -func (op *createFeatureOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetFeature(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getFeatureRaw(ctx context.Context, r *Feature) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) featureDiffsForRawDesired(ctx context.Context, rawDesired *Feature, opts ...dcl.ApplyOption) (initial, desired *Feature, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Feature - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Feature); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Feature, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetFeature(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Feature resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Feature resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Feature resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeFeatureDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Feature: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Feature: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractFeatureFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeFeatureInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Feature: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeFeatureDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Feature: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffFeature(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeFeatureInitialState(rawInitial, rawDesired *Feature) (*Feature, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeFeatureDesiredState(rawDesired, rawInitial *Feature, opts ...dcl.ApplyOption) (*Feature, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.ResourceState = canonicalizeFeatureResourceState(rawDesired.ResourceState, nil, opts...) - rawDesired.Spec = canonicalizeFeatureSpec(rawDesired.Spec, nil, opts...) - rawDesired.State = canonicalizeFeatureState(rawDesired.State, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Feature{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - canonicalDesired.Spec = canonicalizeFeatureSpec(rawDesired.Spec, rawInitial.Spec, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - return canonicalDesired, nil -} - -func canonicalizeFeatureNewState(c *Client, rawNew, rawDesired *Feature) (*Feature, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.ResourceState) && dcl.IsEmptyValueIndirect(rawDesired.ResourceState) { - rawNew.ResourceState = rawDesired.ResourceState - } else { - rawNew.ResourceState = canonicalizeNewFeatureResourceState(c, rawDesired.ResourceState, rawNew.ResourceState) - } - - if dcl.IsEmptyValueIndirect(rawNew.Spec) && dcl.IsEmptyValueIndirect(rawDesired.Spec) { - rawNew.Spec = rawDesired.Spec - } else { - rawNew.Spec = canonicalizeNewFeatureSpec(c, rawDesired.Spec, rawNew.Spec) - } - - if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { - rawNew.State = rawDesired.State - } else { - rawNew.State = canonicalizeNewFeatureState(c, rawDesired.State, rawNew.State) - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { - rawNew.DeleteTime = rawDesired.DeleteTime - } else { - } - - rawNew.Project = rawDesired.Project - - rawNew.Location = rawDesired.Location - - return rawNew, nil -} - -func canonicalizeFeatureResourceState(des, initial *FeatureResourceState, opts ...dcl.ApplyOption) *FeatureResourceState { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureResourceState{} - - return cDes -} - -func canonicalizeFeatureResourceStateSlice(des, initial []FeatureResourceState, opts ...dcl.ApplyOption) []FeatureResourceState { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureResourceState, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureResourceState(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureResourceState, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureResourceState(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureResourceState(c *Client, des, nw *FeatureResourceState) *FeatureResourceState { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureResourceState while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.HasResources, nw.HasResources) { - nw.HasResources = des.HasResources - } - - return nw -} - -func canonicalizeNewFeatureResourceStateSet(c *Client, des, nw []FeatureResourceState) []FeatureResourceState { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureResourceState - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureResourceStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureResourceState(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureResourceStateSlice(c *Client, des, nw []FeatureResourceState) []FeatureResourceState { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureResourceState - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureResourceState(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpec(des, initial *FeatureSpec, opts ...dcl.ApplyOption) *FeatureSpec { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpec{} - - cDes.Multiclusteringress = canonicalizeFeatureSpecMulticlusteringress(des.Multiclusteringress, initial.Multiclusteringress, opts...) - cDes.Fleetobservability = canonicalizeFeatureSpecFleetobservability(des.Fleetobservability, initial.Fleetobservability, opts...) - - return cDes -} - -func canonicalizeFeatureSpecSlice(des, initial []FeatureSpec, opts ...dcl.ApplyOption) []FeatureSpec { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpec, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpec(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpec, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpec(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpec(c *Client, des, nw *FeatureSpec) *FeatureSpec { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpec while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Multiclusteringress = canonicalizeNewFeatureSpecMulticlusteringress(c, des.Multiclusteringress, nw.Multiclusteringress) - nw.Fleetobservability = canonicalizeNewFeatureSpecFleetobservability(c, des.Fleetobservability, nw.Fleetobservability) - - return nw -} - -func canonicalizeNewFeatureSpecSet(c *Client, des, nw []FeatureSpec) []FeatureSpec { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpec - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpec(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecSlice(c *Client, des, nw []FeatureSpec) []FeatureSpec { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpec - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpec(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpecMulticlusteringress(des, initial *FeatureSpecMulticlusteringress, opts ...dcl.ApplyOption) *FeatureSpecMulticlusteringress { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpecMulticlusteringress{} - - if dcl.IsZeroValue(des.ConfigMembership) || (dcl.IsEmptyValueIndirect(des.ConfigMembership) && dcl.IsEmptyValueIndirect(initial.ConfigMembership)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ConfigMembership = initial.ConfigMembership - } else { - cDes.ConfigMembership = des.ConfigMembership - } - - return cDes -} - -func canonicalizeFeatureSpecMulticlusteringressSlice(des, initial []FeatureSpecMulticlusteringress, opts ...dcl.ApplyOption) []FeatureSpecMulticlusteringress { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpecMulticlusteringress, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpecMulticlusteringress(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpecMulticlusteringress, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpecMulticlusteringress(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpecMulticlusteringress(c *Client, des, nw *FeatureSpecMulticlusteringress) *FeatureSpecMulticlusteringress { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpecMulticlusteringress while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewFeatureSpecMulticlusteringressSet(c *Client, des, nw []FeatureSpecMulticlusteringress) []FeatureSpecMulticlusteringress { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpecMulticlusteringress - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecMulticlusteringressNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpecMulticlusteringress(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecMulticlusteringressSlice(c *Client, des, nw []FeatureSpecMulticlusteringress) []FeatureSpecMulticlusteringress { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpecMulticlusteringress - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpecMulticlusteringress(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpecFleetobservability(des, initial *FeatureSpecFleetobservability, opts ...dcl.ApplyOption) *FeatureSpecFleetobservability { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpecFleetobservability{} - - cDes.LoggingConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfig(des.LoggingConfig, initial.LoggingConfig, opts...) - - return cDes -} - -func canonicalizeFeatureSpecFleetobservabilitySlice(des, initial []FeatureSpecFleetobservability, opts ...dcl.ApplyOption) []FeatureSpecFleetobservability { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpecFleetobservability, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpecFleetobservability(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpecFleetobservability, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpecFleetobservability(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpecFleetobservability(c *Client, des, nw *FeatureSpecFleetobservability) *FeatureSpecFleetobservability { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservability while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.LoggingConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, des.LoggingConfig, nw.LoggingConfig) - - return nw -} - -func canonicalizeNewFeatureSpecFleetobservabilitySet(c *Client, des, nw []FeatureSpecFleetobservability) []FeatureSpecFleetobservability { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpecFleetobservability - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecFleetobservabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpecFleetobservability(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecFleetobservabilitySlice(c *Client, des, nw []FeatureSpecFleetobservability) []FeatureSpecFleetobservability { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpecFleetobservability - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpecFleetobservability(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpecFleetobservabilityLoggingConfig{} - - cDes.DefaultConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(des.DefaultConfig, initial.DefaultConfig, opts...) - cDes.FleetScopeLogsConfig = canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(des.FleetScopeLogsConfig, initial.FleetScopeLogsConfig, opts...) - - return cDes -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfig) *FeatureSpecFleetobservabilityLoggingConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DefaultConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, des.DefaultConfig, nw.DefaultConfig) - nw.FleetScopeLogsConfig = canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, des.FleetScopeLogsConfig, nw.FleetScopeLogsConfig) - - return nw -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfig) []FeatureSpecFleetobservabilityLoggingConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpecFleetobservabilityLoggingConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfig) []FeatureSpecFleetobservabilityLoggingConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpecFleetobservabilityLoggingConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfigDefaultConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(des, initial *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, opts ...dcl.ApplyOption) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(des, initial []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, opts ...dcl.ApplyOption) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, des, nw *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSet(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, des, nw []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureState(des, initial *FeatureState, opts ...dcl.ApplyOption) *FeatureState { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureState{} - - return cDes -} - -func canonicalizeFeatureStateSlice(des, initial []FeatureState, opts ...dcl.ApplyOption) []FeatureState { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureState, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureState(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureState, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureState(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureState(c *Client, des, nw *FeatureState) *FeatureState { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureState while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.State = canonicalizeNewFeatureStateState(c, des.State, nw.State) - - return nw -} - -func canonicalizeNewFeatureStateSet(c *Client, des, nw []FeatureState) []FeatureState { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureState - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureState(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureStateSlice(c *Client, des, nw []FeatureState) []FeatureState { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureState - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureState(c, &d, &n)) - } - - return items -} - -func canonicalizeFeatureStateState(des, initial *FeatureStateState, opts ...dcl.ApplyOption) *FeatureStateState { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &FeatureStateState{} - - return cDes -} - -func canonicalizeFeatureStateStateSlice(des, initial []FeatureStateState, opts ...dcl.ApplyOption) []FeatureStateState { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]FeatureStateState, 0, len(des)) - for _, d := range des { - cd := canonicalizeFeatureStateState(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]FeatureStateState, 0, len(des)) - for i, d := range des { - cd := canonicalizeFeatureStateState(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewFeatureStateState(c *Client, des, nw *FeatureStateState) *FeatureStateState { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for FeatureStateState while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Description, nw.Description) { - nw.Description = des.Description - } - if dcl.StringCanonicalize(des.UpdateTime, nw.UpdateTime) { - nw.UpdateTime = des.UpdateTime - } - - return nw -} - -func canonicalizeNewFeatureStateStateSet(c *Client, des, nw []FeatureStateState) []FeatureStateState { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []FeatureStateState - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareFeatureStateStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewFeatureStateState(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewFeatureStateStateSlice(c *Client, des, nw []FeatureStateState) []FeatureStateState { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []FeatureStateState - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewFeatureStateState(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffFeature(c *Client, desired, actual *Feature, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceState, actual.ResourceState, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureResourceStateNewStyle, EmptyObject: EmptyFeatureResourceState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceState")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Spec, actual.Spec, dcl.DiffInfo{ObjectFunction: compareFeatureSpecNewStyle, EmptyObject: EmptyFeatureSpec, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Spec")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureStateNewStyle, EmptyObject: EmptyFeatureState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareFeatureResourceStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureResourceState) - if !ok { - desiredNotPointer, ok := d.(FeatureResourceState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureResourceState or *FeatureResourceState", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureResourceState) - if !ok { - actualNotPointer, ok := a.(FeatureResourceState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureResourceState", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.HasResources, actual.HasResources, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HasResources")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpec) - if !ok { - desiredNotPointer, ok := d.(FeatureSpec) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpec or *FeatureSpec", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpec) - if !ok { - actualNotPointer, ok := a.(FeatureSpec) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpec", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Multiclusteringress, actual.Multiclusteringress, dcl.DiffInfo{ObjectFunction: compareFeatureSpecMulticlusteringressNewStyle, EmptyObject: EmptyFeatureSpecMulticlusteringress, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Multiclusteringress")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Fleetobservability, actual.Fleetobservability, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityNewStyle, EmptyObject: EmptyFeatureSpecFleetobservability, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Fleetobservability")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecMulticlusteringressNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpecMulticlusteringress) - if !ok { - desiredNotPointer, ok := d.(FeatureSpecMulticlusteringress) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecMulticlusteringress or *FeatureSpecMulticlusteringress", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpecMulticlusteringress) - if !ok { - actualNotPointer, ok := a.(FeatureSpecMulticlusteringress) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecMulticlusteringress", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ConfigMembership, actual.ConfigMembership, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("ConfigMembership")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecFleetobservabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpecFleetobservability) - if !ok { - desiredNotPointer, ok := d.(FeatureSpecFleetobservability) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservability or *FeatureSpecFleetobservability", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpecFleetobservability) - if !ok { - actualNotPointer, ok := a.(FeatureSpecFleetobservability) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservability", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LoggingConfig, actual.LoggingConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("LoggingConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecFleetobservabilityLoggingConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfig) - if !ok { - desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfig or *FeatureSpecFleetobservabilityLoggingConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfig) - if !ok { - actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DefaultConfig, actual.DefaultConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("DefaultConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.FleetScopeLogsConfig, actual.FleetScopeLogsConfig, dcl.DiffInfo{ObjectFunction: compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle, EmptyObject: EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("FleetScopeLogsConfig")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecFleetobservabilityLoggingConfigDefaultConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) - if !ok { - desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig or *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) - if !ok { - actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigDefaultConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) - if !ok { - desiredNotPointer, ok := d.(FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig or *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) - if !ok { - actualNotPointer, ok := a.(FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateFeatureUpdateFeatureOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureState) - if !ok { - desiredNotPointer, ok := d.(FeatureState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureState or *FeatureState", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureState) - if !ok { - actualNotPointer, ok := a.(FeatureState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureState", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareFeatureStateStateNewStyle, EmptyObject: EmptyFeatureStateState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareFeatureStateStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*FeatureStateState) - if !ok { - desiredNotPointer, ok := d.(FeatureStateState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureStateState or *FeatureStateState", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*FeatureStateState) - if !ok { - actualNotPointer, ok := a.(FeatureStateState) - if !ok { - return nil, fmt.Errorf("obj %v is not a FeatureStateState", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Code, actual.Code, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Code")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Feature) urlNormalized() *Feature { - normalized := dcl.Copy(*r).(Feature) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *Feature) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateFeature" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Feature resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Feature) marshal(c *Client) ([]byte, error) { - m, err := expandFeature(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Feature: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalFeature decodes JSON responses into the Feature resource schema. -func unmarshalFeature(b []byte, c *Client, res *Feature) (*Feature, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapFeature(m, c, res) -} - -func unmarshalMapFeature(m map[string]interface{}, c *Client, res *Feature) (*Feature, error) { - - flattened := flattenFeature(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandFeature expands Feature into a JSON request object. -func expandFeature(c *Client, f *Feature) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/locations/%s/features/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v, err := expandFeatureSpec(c, f.Spec, res); err != nil { - return nil, fmt.Errorf("error expanding Spec into spec: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["spec"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - - return m, nil -} - -// flattenFeature flattens Feature from a JSON request object into the -// Feature type. -func flattenFeature(c *Client, i interface{}, res *Feature) *Feature { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Feature{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.ResourceState = flattenFeatureResourceState(c, m["resourceState"], res) - resultRes.Spec = flattenFeatureSpec(c, m["spec"], res) - resultRes.State = flattenFeatureState(c, m["state"], res) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - - return resultRes -} - -// expandFeatureResourceStateMap expands the contents of FeatureResourceState into a JSON -// request object. -func expandFeatureResourceStateMap(c *Client, f map[string]FeatureResourceState, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureResourceState(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureResourceStateSlice expands the contents of FeatureResourceState into a JSON -// request object. -func expandFeatureResourceStateSlice(c *Client, f []FeatureResourceState, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureResourceState(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureResourceStateMap flattens the contents of FeatureResourceState from a JSON -// response object. -func flattenFeatureResourceStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureResourceState { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureResourceState{} - } - - if len(a) == 0 { - return map[string]FeatureResourceState{} - } - - items := make(map[string]FeatureResourceState) - for k, item := range a { - items[k] = *flattenFeatureResourceState(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureResourceStateSlice flattens the contents of FeatureResourceState from a JSON -// response object. -func flattenFeatureResourceStateSlice(c *Client, i interface{}, res *Feature) []FeatureResourceState { - a, ok := i.([]interface{}) - if !ok { - return []FeatureResourceState{} - } - - if len(a) == 0 { - return []FeatureResourceState{} - } - - items := make([]FeatureResourceState, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureResourceState(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureResourceState expands an instance of FeatureResourceState into a JSON -// request object. -func expandFeatureResourceState(c *Client, f *FeatureResourceState, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenFeatureResourceState flattens an instance of FeatureResourceState from a JSON -// response object. -func flattenFeatureResourceState(c *Client, i interface{}, res *Feature) *FeatureResourceState { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureResourceState{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureResourceState - } - r.State = flattenFeatureResourceStateStateEnum(m["state"]) - r.HasResources = dcl.FlattenBool(m["hasResources"]) - - return r -} - -// expandFeatureSpecMap expands the contents of FeatureSpec into a JSON -// request object. -func expandFeatureSpecMap(c *Client, f map[string]FeatureSpec, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpec(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecSlice expands the contents of FeatureSpec into a JSON -// request object. -func expandFeatureSpecSlice(c *Client, f []FeatureSpec, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpec(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecMap flattens the contents of FeatureSpec from a JSON -// response object. -func flattenFeatureSpecMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpec { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpec{} - } - - if len(a) == 0 { - return map[string]FeatureSpec{} - } - - items := make(map[string]FeatureSpec) - for k, item := range a { - items[k] = *flattenFeatureSpec(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecSlice flattens the contents of FeatureSpec from a JSON -// response object. -func flattenFeatureSpecSlice(c *Client, i interface{}, res *Feature) []FeatureSpec { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpec{} - } - - if len(a) == 0 { - return []FeatureSpec{} - } - - items := make([]FeatureSpec, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpec(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpec expands an instance of FeatureSpec into a JSON -// request object. -func expandFeatureSpec(c *Client, f *FeatureSpec, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandFeatureSpecMulticlusteringress(c, f.Multiclusteringress, res); err != nil { - return nil, fmt.Errorf("error expanding Multiclusteringress into multiclusteringress: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["multiclusteringress"] = v - } - if v, err := expandFeatureSpecFleetobservability(c, f.Fleetobservability, res); err != nil { - return nil, fmt.Errorf("error expanding Fleetobservability into fleetobservability: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["fleetobservability"] = v - } - - return m, nil -} - -// flattenFeatureSpec flattens an instance of FeatureSpec from a JSON -// response object. -func flattenFeatureSpec(c *Client, i interface{}, res *Feature) *FeatureSpec { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpec{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpec - } - r.Multiclusteringress = flattenFeatureSpecMulticlusteringress(c, m["multiclusteringress"], res) - r.Fleetobservability = flattenFeatureSpecFleetobservability(c, m["fleetobservability"], res) - - return r -} - -// expandFeatureSpecMulticlusteringressMap expands the contents of FeatureSpecMulticlusteringress into a JSON -// request object. -func expandFeatureSpecMulticlusteringressMap(c *Client, f map[string]FeatureSpecMulticlusteringress, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpecMulticlusteringress(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecMulticlusteringressSlice expands the contents of FeatureSpecMulticlusteringress into a JSON -// request object. -func expandFeatureSpecMulticlusteringressSlice(c *Client, f []FeatureSpecMulticlusteringress, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpecMulticlusteringress(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecMulticlusteringressMap flattens the contents of FeatureSpecMulticlusteringress from a JSON -// response object. -func flattenFeatureSpecMulticlusteringressMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecMulticlusteringress { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecMulticlusteringress{} - } - - if len(a) == 0 { - return map[string]FeatureSpecMulticlusteringress{} - } - - items := make(map[string]FeatureSpecMulticlusteringress) - for k, item := range a { - items[k] = *flattenFeatureSpecMulticlusteringress(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecMulticlusteringressSlice flattens the contents of FeatureSpecMulticlusteringress from a JSON -// response object. -func flattenFeatureSpecMulticlusteringressSlice(c *Client, i interface{}, res *Feature) []FeatureSpecMulticlusteringress { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecMulticlusteringress{} - } - - if len(a) == 0 { - return []FeatureSpecMulticlusteringress{} - } - - items := make([]FeatureSpecMulticlusteringress, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecMulticlusteringress(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpecMulticlusteringress expands an instance of FeatureSpecMulticlusteringress into a JSON -// request object. -func expandFeatureSpecMulticlusteringress(c *Client, f *FeatureSpecMulticlusteringress, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ConfigMembership; !dcl.IsEmptyValueIndirect(v) { - m["configMembership"] = v - } - - return m, nil -} - -// flattenFeatureSpecMulticlusteringress flattens an instance of FeatureSpecMulticlusteringress from a JSON -// response object. -func flattenFeatureSpecMulticlusteringress(c *Client, i interface{}, res *Feature) *FeatureSpecMulticlusteringress { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpecMulticlusteringress{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpecMulticlusteringress - } - r.ConfigMembership = dcl.FlattenString(m["configMembership"]) - - return r -} - -// expandFeatureSpecFleetobservabilityMap expands the contents of FeatureSpecFleetobservability into a JSON -// request object. -func expandFeatureSpecFleetobservabilityMap(c *Client, f map[string]FeatureSpecFleetobservability, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpecFleetobservability(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecFleetobservabilitySlice expands the contents of FeatureSpecFleetobservability into a JSON -// request object. -func expandFeatureSpecFleetobservabilitySlice(c *Client, f []FeatureSpecFleetobservability, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpecFleetobservability(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecFleetobservabilityMap flattens the contents of FeatureSpecFleetobservability from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservability { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservability{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservability{} - } - - items := make(map[string]FeatureSpecFleetobservability) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservability(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecFleetobservabilitySlice flattens the contents of FeatureSpecFleetobservability from a JSON -// response object. -func flattenFeatureSpecFleetobservabilitySlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservability { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservability{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservability{} - } - - items := make([]FeatureSpecFleetobservability, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservability(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpecFleetobservability expands an instance of FeatureSpecFleetobservability into a JSON -// request object. -func expandFeatureSpecFleetobservability(c *Client, f *FeatureSpecFleetobservability, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, f.LoggingConfig, res); err != nil { - return nil, fmt.Errorf("error expanding LoggingConfig into loggingConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["loggingConfig"] = v - } - - return m, nil -} - -// flattenFeatureSpecFleetobservability flattens an instance of FeatureSpecFleetobservability from a JSON -// response object. -func flattenFeatureSpecFleetobservability(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservability { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpecFleetobservability{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpecFleetobservability - } - r.LoggingConfig = flattenFeatureSpecFleetobservabilityLoggingConfig(c, m["loggingConfig"], res) - - return r -} - -// expandFeatureSpecFleetobservabilityLoggingConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfig, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecFleetobservabilityLoggingConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfig, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservabilityLoggingConfig{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservabilityLoggingConfig{} - } - - items := make(map[string]FeatureSpecFleetobservabilityLoggingConfig) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfig { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservabilityLoggingConfig{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservabilityLoggingConfig{} - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpecFleetobservabilityLoggingConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfig, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, f.DefaultConfig, res); err != nil { - return nil, fmt.Errorf("error expanding DefaultConfig into defaultConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["defaultConfig"] = v - } - if v, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, f.FleetScopeLogsConfig, res); err != nil { - return nil, fmt.Errorf("error expanding FleetScopeLogsConfig into fleetScopeLogsConfig: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["fleetScopeLogsConfig"] = v - } - - return m, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpecFleetobservabilityLoggingConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpecFleetobservabilityLoggingConfig - } - r.DefaultConfig = flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, m["defaultConfig"], res) - r.FleetScopeLogsConfig = flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, m["fleetScopeLogsConfig"], res) - - return r -} - -// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - - items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfigDefaultConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpecFleetobservabilityLoggingConfigDefaultConfig - } - r.Mode = flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(m["mode"]) - - return r -} - -// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap expands the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap(c *Client, f map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice expands the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, f []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - - items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig expands an instance of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig into a JSON -// request object. -func expandFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, f *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig flattens an instance of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(c *Client, i interface{}, res *Feature) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig - } - r.Mode = flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(m["mode"]) - - return r -} - -// expandFeatureStateMap expands the contents of FeatureState into a JSON -// request object. -func expandFeatureStateMap(c *Client, f map[string]FeatureState, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureState(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureStateSlice expands the contents of FeatureState into a JSON -// request object. -func expandFeatureStateSlice(c *Client, f []FeatureState, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureState(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureStateMap flattens the contents of FeatureState from a JSON -// response object. -func flattenFeatureStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureState { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureState{} - } - - if len(a) == 0 { - return map[string]FeatureState{} - } - - items := make(map[string]FeatureState) - for k, item := range a { - items[k] = *flattenFeatureState(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureStateSlice flattens the contents of FeatureState from a JSON -// response object. -func flattenFeatureStateSlice(c *Client, i interface{}, res *Feature) []FeatureState { - a, ok := i.([]interface{}) - if !ok { - return []FeatureState{} - } - - if len(a) == 0 { - return []FeatureState{} - } - - items := make([]FeatureState, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureState(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureState expands an instance of FeatureState into a JSON -// request object. -func expandFeatureState(c *Client, f *FeatureState, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenFeatureState flattens an instance of FeatureState from a JSON -// response object. -func flattenFeatureState(c *Client, i interface{}, res *Feature) *FeatureState { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureState{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureState - } - r.State = flattenFeatureStateState(c, m["state"], res) - - return r -} - -// expandFeatureStateStateMap expands the contents of FeatureStateState into a JSON -// request object. -func expandFeatureStateStateMap(c *Client, f map[string]FeatureStateState, res *Feature) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandFeatureStateState(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandFeatureStateStateSlice expands the contents of FeatureStateState into a JSON -// request object. -func expandFeatureStateStateSlice(c *Client, f []FeatureStateState, res *Feature) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandFeatureStateState(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenFeatureStateStateMap flattens the contents of FeatureStateState from a JSON -// response object. -func flattenFeatureStateStateMap(c *Client, i interface{}, res *Feature) map[string]FeatureStateState { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureStateState{} - } - - if len(a) == 0 { - return map[string]FeatureStateState{} - } - - items := make(map[string]FeatureStateState) - for k, item := range a { - items[k] = *flattenFeatureStateState(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenFeatureStateStateSlice flattens the contents of FeatureStateState from a JSON -// response object. -func flattenFeatureStateStateSlice(c *Client, i interface{}, res *Feature) []FeatureStateState { - a, ok := i.([]interface{}) - if !ok { - return []FeatureStateState{} - } - - if len(a) == 0 { - return []FeatureStateState{} - } - - items := make([]FeatureStateState, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureStateState(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandFeatureStateState expands an instance of FeatureStateState into a JSON -// request object. -func expandFeatureStateState(c *Client, f *FeatureStateState, res *Feature) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenFeatureStateState flattens an instance of FeatureStateState from a JSON -// response object. -func flattenFeatureStateState(c *Client, i interface{}, res *Feature) *FeatureStateState { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &FeatureStateState{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyFeatureStateState - } - r.Code = flattenFeatureStateStateCodeEnum(m["code"]) - r.Description = dcl.FlattenString(m["description"]) - r.UpdateTime = dcl.FlattenString(m["updateTime"]) - - return r -} - -// flattenFeatureResourceStateStateEnumMap flattens the contents of FeatureResourceStateStateEnum from a JSON -// response object. -func flattenFeatureResourceStateStateEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureResourceStateStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureResourceStateStateEnum{} - } - - if len(a) == 0 { - return map[string]FeatureResourceStateStateEnum{} - } - - items := make(map[string]FeatureResourceStateStateEnum) - for k, item := range a { - items[k] = *flattenFeatureResourceStateStateEnum(item.(interface{})) - } - - return items -} - -// flattenFeatureResourceStateStateEnumSlice flattens the contents of FeatureResourceStateStateEnum from a JSON -// response object. -func flattenFeatureResourceStateStateEnumSlice(c *Client, i interface{}, res *Feature) []FeatureResourceStateStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []FeatureResourceStateStateEnum{} - } - - if len(a) == 0 { - return []FeatureResourceStateStateEnum{} - } - - items := make([]FeatureResourceStateStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureResourceStateStateEnum(item.(interface{}))) - } - - return items -} - -// flattenFeatureResourceStateStateEnum asserts that an interface is a string, and returns a -// pointer to a *FeatureResourceStateStateEnum with the same value as that string. -func flattenFeatureResourceStateStateEnum(i interface{}) *FeatureResourceStateStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return FeatureResourceStateStateEnumRef(s) -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} - } - - items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(item.(interface{})) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum{} - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(item.(interface{}))) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum asserts that an interface is a string, and returns a -// pointer to a *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum with the same value as that string. -func flattenFeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum(i interface{}) *FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return FeatureSpecFleetobservabilityLoggingConfigDefaultConfigModeEnumRef(s) -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumMap flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} - } - - if len(a) == 0 { - return map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} - } - - items := make(map[string]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum) - for k, item := range a { - items[k] = *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(item.(interface{})) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumSlice flattens the contents of FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum from a JSON -// response object. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} - } - - if len(a) == 0 { - return []FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum{} - } - - items := make([]FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(item.(interface{}))) - } - - return items -} - -// flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum asserts that an interface is a string, and returns a -// pointer to a *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum with the same value as that string. -func flattenFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum(i interface{}) *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigModeEnumRef(s) -} - -// flattenFeatureStateStateCodeEnumMap flattens the contents of FeatureStateStateCodeEnum from a JSON -// response object. -func flattenFeatureStateStateCodeEnumMap(c *Client, i interface{}, res *Feature) map[string]FeatureStateStateCodeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]FeatureStateStateCodeEnum{} - } - - if len(a) == 0 { - return map[string]FeatureStateStateCodeEnum{} - } - - items := make(map[string]FeatureStateStateCodeEnum) - for k, item := range a { - items[k] = *flattenFeatureStateStateCodeEnum(item.(interface{})) - } - - return items -} - -// flattenFeatureStateStateCodeEnumSlice flattens the contents of FeatureStateStateCodeEnum from a JSON -// response object. -func flattenFeatureStateStateCodeEnumSlice(c *Client, i interface{}, res *Feature) []FeatureStateStateCodeEnum { - a, ok := i.([]interface{}) - if !ok { - return []FeatureStateStateCodeEnum{} - } - - if len(a) == 0 { - return []FeatureStateStateCodeEnum{} - } - - items := make([]FeatureStateStateCodeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenFeatureStateStateCodeEnum(item.(interface{}))) - } - - return items -} - -// flattenFeatureStateStateCodeEnum asserts that an interface is a string, and returns a -// pointer to a *FeatureStateStateCodeEnum with the same value as that string. -func flattenFeatureStateStateCodeEnum(i interface{}) *FeatureStateStateCodeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return FeatureStateStateCodeEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Feature) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalFeature(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type featureDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp featureApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToFeatureDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]featureDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []featureDiff - // For each operation name, create a featureDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := featureDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToFeatureApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToFeatureApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (featureApiOperation, error) { - switch opName { - - case "updateFeatureUpdateFeatureOperation": - return &updateFeatureUpdateFeatureOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractFeatureFields(r *Feature) error { - vResourceState := r.ResourceState - if vResourceState == nil { - // note: explicitly not the empty object. - vResourceState = &FeatureResourceState{} - } - if err := extractFeatureResourceStateFields(r, vResourceState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceState) { - r.ResourceState = vResourceState - } - vSpec := r.Spec - if vSpec == nil { - // note: explicitly not the empty object. - vSpec = &FeatureSpec{} - } - if err := extractFeatureSpecFields(r, vSpec); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSpec) { - r.Spec = vSpec - } - vState := r.State - if vState == nil { - // note: explicitly not the empty object. - vState = &FeatureState{} - } - if err := extractFeatureStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - r.State = vState - } - return nil -} -func extractFeatureResourceStateFields(r *Feature, o *FeatureResourceState) error { - return nil -} -func extractFeatureSpecFields(r *Feature, o *FeatureSpec) error { - vMulticlusteringress := o.Multiclusteringress - if vMulticlusteringress == nil { - // note: explicitly not the empty object. - vMulticlusteringress = &FeatureSpecMulticlusteringress{} - } - if err := extractFeatureSpecMulticlusteringressFields(r, vMulticlusteringress); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMulticlusteringress) { - o.Multiclusteringress = vMulticlusteringress - } - vFleetobservability := o.Fleetobservability - if vFleetobservability == nil { - // note: explicitly not the empty object. - vFleetobservability = &FeatureSpecFleetobservability{} - } - if err := extractFeatureSpecFleetobservabilityFields(r, vFleetobservability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFleetobservability) { - o.Fleetobservability = vFleetobservability - } - return nil -} -func extractFeatureSpecMulticlusteringressFields(r *Feature, o *FeatureSpecMulticlusteringress) error { - return nil -} -func extractFeatureSpecFleetobservabilityFields(r *Feature, o *FeatureSpecFleetobservability) error { - vLoggingConfig := o.LoggingConfig - if vLoggingConfig == nil { - // note: explicitly not the empty object. - vLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigFields(r, vLoggingConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLoggingConfig) { - o.LoggingConfig = vLoggingConfig - } - return nil -} -func extractFeatureSpecFleetobservabilityLoggingConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfig) error { - vDefaultConfig := o.DefaultConfig - if vDefaultConfig == nil { - // note: explicitly not the empty object. - vDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r, vDefaultConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDefaultConfig) { - o.DefaultConfig = vDefaultConfig - } - vFleetScopeLogsConfig := o.FleetScopeLogsConfig - if vFleetScopeLogsConfig == nil { - // note: explicitly not the empty object. - vFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r, vFleetScopeLogsConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFleetScopeLogsConfig) { - o.FleetScopeLogsConfig = vFleetScopeLogsConfig - } - return nil -} -func extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) error { - return nil -} -func extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) error { - return nil -} -func extractFeatureStateFields(r *Feature, o *FeatureState) error { - vState := o.State - if vState == nil { - // note: explicitly not the empty object. - vState = &FeatureStateState{} - } - if err := extractFeatureStateStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - o.State = vState - } - return nil -} -func extractFeatureStateStateFields(r *Feature, o *FeatureStateState) error { - return nil -} - -func postReadExtractFeatureFields(r *Feature) error { - vResourceState := r.ResourceState - if vResourceState == nil { - // note: explicitly not the empty object. - vResourceState = &FeatureResourceState{} - } - if err := postReadExtractFeatureResourceStateFields(r, vResourceState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceState) { - r.ResourceState = vResourceState - } - vSpec := r.Spec - if vSpec == nil { - // note: explicitly not the empty object. - vSpec = &FeatureSpec{} - } - if err := postReadExtractFeatureSpecFields(r, vSpec); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSpec) { - r.Spec = vSpec - } - vState := r.State - if vState == nil { - // note: explicitly not the empty object. - vState = &FeatureState{} - } - if err := postReadExtractFeatureStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - r.State = vState - } - return nil -} -func postReadExtractFeatureResourceStateFields(r *Feature, o *FeatureResourceState) error { - return nil -} -func postReadExtractFeatureSpecFields(r *Feature, o *FeatureSpec) error { - vMulticlusteringress := o.Multiclusteringress - if vMulticlusteringress == nil { - // note: explicitly not the empty object. - vMulticlusteringress = &FeatureSpecMulticlusteringress{} - } - if err := extractFeatureSpecMulticlusteringressFields(r, vMulticlusteringress); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMulticlusteringress) { - o.Multiclusteringress = vMulticlusteringress - } - vFleetobservability := o.Fleetobservability - if vFleetobservability == nil { - // note: explicitly not the empty object. - vFleetobservability = &FeatureSpecFleetobservability{} - } - if err := extractFeatureSpecFleetobservabilityFields(r, vFleetobservability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFleetobservability) { - o.Fleetobservability = vFleetobservability - } - return nil -} -func postReadExtractFeatureSpecMulticlusteringressFields(r *Feature, o *FeatureSpecMulticlusteringress) error { - return nil -} -func postReadExtractFeatureSpecFleetobservabilityFields(r *Feature, o *FeatureSpecFleetobservability) error { - vLoggingConfig := o.LoggingConfig - if vLoggingConfig == nil { - // note: explicitly not the empty object. - vLoggingConfig = &FeatureSpecFleetobservabilityLoggingConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigFields(r, vLoggingConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLoggingConfig) { - o.LoggingConfig = vLoggingConfig - } - return nil -} -func postReadExtractFeatureSpecFleetobservabilityLoggingConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfig) error { - vDefaultConfig := o.DefaultConfig - if vDefaultConfig == nil { - // note: explicitly not the empty object. - vDefaultConfig = &FeatureSpecFleetobservabilityLoggingConfigDefaultConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r, vDefaultConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDefaultConfig) { - o.DefaultConfig = vDefaultConfig - } - vFleetScopeLogsConfig := o.FleetScopeLogsConfig - if vFleetScopeLogsConfig == nil { - // note: explicitly not the empty object. - vFleetScopeLogsConfig = &FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig{} - } - if err := extractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r, vFleetScopeLogsConfig); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFleetScopeLogsConfig) { - o.FleetScopeLogsConfig = vFleetScopeLogsConfig - } - return nil -} -func postReadExtractFeatureSpecFleetobservabilityLoggingConfigDefaultConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigDefaultConfig) error { - return nil -} -func postReadExtractFeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigFields(r *Feature, o *FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig) error { - return nil -} -func postReadExtractFeatureStateFields(r *Feature, o *FeatureState) error { - vState := o.State - if vState == nil { - // note: explicitly not the empty object. - vState = &FeatureStateState{} - } - if err := extractFeatureStateStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - o.State = vState - } - return nil -} -func postReadExtractFeatureStateStateFields(r *Feature, o *FeatureStateState) error { - return nil -} diff --git a/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl deleted file mode 100644 index 16fed25491ff..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/hub_beta_utils.go.tmpl +++ /dev/null @@ -1,122 +0,0 @@ -package gkehub - -import ( - "bytes" - "context" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" -) - -func expandHubReferenceLink(_ *Client, val *string, _ *Membership) (interface{}, error) { - if val == nil { - return nil, nil - } - - v := *val - - if strings.HasPrefix(v, "https:") { - return strings.Replace(strings.Replace(strings.Replace(*val, "https:", "", 1), "v1/", "", 1), "v1beta1/", "", 1), nil - } else if strings.HasPrefix(v, "//container.googleapis.com") { - return v, nil - } - return "//container.googleapis.com/" + v, nil -} - -func flattenHubReferenceLink(_ *Client, config interface{}, _ *Membership) *string { - v, ok := config.(string) - if !ok { - return nil - } - - v = strings.Replace(v, "//container.googleapis.com/", "", 1) - - return &v -} - -// Feature has custom url methods because it uses v1beta endpoints instead of v1beta1. -func (r *Feature) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil -} - -func (r *Feature) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil - -} - -func (r *Feature) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features?featureId={{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil - -} - -func (r *Feature) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/features/{{ "{{" }}name{{ "}}" }}", "https://gkehub.googleapis.com/v1beta/", userBasePath, params), nil -} - -func (op *updateFeatureUpdateFeatureOperation) do(ctx context.Context, r *Feature, c *Client) error { - _, err := c.GetFeature(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateFeature") - if err != nil { - return err - } - u = strings.Replace(u, "v1beta1", "v1beta", 1) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": "labels,spec"}) - if err != nil { - return err - } - - req, err := newUpdateFeatureUpdateFeatureRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.Infof("Created update: %#v", req) - body, err := marshalUpdateFeatureUpdateFeatureRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(ctx, c.Config, "https://gkehub.googleapis.com/v1beta/", "GET") - - if err != nil { - return err - } - - return nil -} diff --git a/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl index deed358e280c..7bba499538b1 100644 --- a/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/hub_utils.go.tmpl @@ -9,14 +9,9 @@ import ( "io" "strings" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) // getMembershipSpecs returns a map of membership specs taken from the get response of the feature membership's feature object. diff --git a/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl deleted file mode 100644 index e66ec693cda0..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/membership.go.tmpl +++ /dev/null @@ -1,902 +0,0 @@ -package gkehub - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -) - -type Membership struct { - Endpoint *MembershipEndpoint `json:"endpoint"` - Name *string `json:"name"` - Labels map[string]string `json:"labels"` - Description *string `json:"description"` - State *MembershipState `json:"state"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - DeleteTime *string `json:"deleteTime"` - ExternalId *string `json:"externalId"` - LastConnectionTime *string `json:"lastConnectionTime"` - UniqueId *string `json:"uniqueId"` - Authority *MembershipAuthority `json:"authority"` - InfrastructureType *MembershipInfrastructureTypeEnum `json:"infrastructureType"` - Project *string `json:"project"` - Location *string `json:"location"` -} - -func (r *Membership) String() string { - return dcl.SprintResource(r) -} - -// The enum MembershipStateCodeEnum. -type MembershipStateCodeEnum string - -// MembershipStateCodeEnumRef returns a *MembershipStateCodeEnum with the value of string s -// If the empty string is provided, nil is returned. -func MembershipStateCodeEnumRef(s string) *MembershipStateCodeEnum { - v := MembershipStateCodeEnum(s) - return &v -} - -func (v MembershipStateCodeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"CODE_UNSPECIFIED", "CREATING", "READY", "DELETING", "UPDATING", "SERVICE_UPDATING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MembershipStateCodeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum MembershipInfrastructureTypeEnum. -type MembershipInfrastructureTypeEnum string - -// MembershipInfrastructureTypeEnumRef returns a *MembershipInfrastructureTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func MembershipInfrastructureTypeEnumRef(s string) *MembershipInfrastructureTypeEnum { - v := MembershipInfrastructureTypeEnum(s) - return &v -} - -func (v MembershipInfrastructureTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"INFRASTRUCTURE_TYPE_UNSPECIFIED", "ON_PREM", "MULTI_CLOUD"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MembershipInfrastructureTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -type MembershipEndpoint struct { - empty bool `json:"-"` - GkeCluster *MembershipEndpointGkeCluster `json:"gkeCluster"` - KubernetesMetadata *MembershipEndpointKubernetesMetadata `json:"kubernetesMetadata"` - KubernetesResource *MembershipEndpointKubernetesResource `json:"kubernetesResource"` -} - -type jsonMembershipEndpoint MembershipEndpoint - -func (r *MembershipEndpoint) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpoint - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpoint - } else { - - r.GkeCluster = res.GkeCluster - - r.KubernetesMetadata = res.KubernetesMetadata - - r.KubernetesResource = res.KubernetesResource - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpoint is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpoint *MembershipEndpoint = &MembershipEndpoint{empty: true} - -func (r *MembershipEndpoint) Empty() bool { - return r.empty -} - -func (r *MembershipEndpoint) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpoint) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointGkeCluster struct { - empty bool `json:"-"` - ResourceLink *string `json:"resourceLink"` -} - -type jsonMembershipEndpointGkeCluster MembershipEndpointGkeCluster - -func (r *MembershipEndpointGkeCluster) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointGkeCluster - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointGkeCluster - } else { - - r.ResourceLink = res.ResourceLink - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointGkeCluster is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointGkeCluster *MembershipEndpointGkeCluster = &MembershipEndpointGkeCluster{empty: true} - -func (r *MembershipEndpointGkeCluster) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointGkeCluster) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointGkeCluster) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointKubernetesMetadata struct { - empty bool `json:"-"` - KubernetesApiServerVersion *string `json:"kubernetesApiServerVersion"` - NodeProviderId *string `json:"nodeProviderId"` - NodeCount *int64 `json:"nodeCount"` - VcpuCount *int64 `json:"vcpuCount"` - MemoryMb *int64 `json:"memoryMb"` - UpdateTime *string `json:"updateTime"` -} - -type jsonMembershipEndpointKubernetesMetadata MembershipEndpointKubernetesMetadata - -func (r *MembershipEndpointKubernetesMetadata) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointKubernetesMetadata - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointKubernetesMetadata - } else { - - r.KubernetesApiServerVersion = res.KubernetesApiServerVersion - - r.NodeProviderId = res.NodeProviderId - - r.NodeCount = res.NodeCount - - r.VcpuCount = res.VcpuCount - - r.MemoryMb = res.MemoryMb - - r.UpdateTime = res.UpdateTime - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointKubernetesMetadata is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointKubernetesMetadata *MembershipEndpointKubernetesMetadata = &MembershipEndpointKubernetesMetadata{empty: true} - -func (r *MembershipEndpointKubernetesMetadata) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointKubernetesMetadata) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointKubernetesMetadata) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointKubernetesResource struct { - empty bool `json:"-"` - MembershipCrManifest *string `json:"membershipCrManifest"` - MembershipResources []MembershipEndpointKubernetesResourceMembershipResources `json:"membershipResources"` - ConnectResources []MembershipEndpointKubernetesResourceConnectResources `json:"connectResources"` - ResourceOptions *MembershipEndpointKubernetesResourceResourceOptions `json:"resourceOptions"` -} - -type jsonMembershipEndpointKubernetesResource MembershipEndpointKubernetesResource - -func (r *MembershipEndpointKubernetesResource) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointKubernetesResource - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointKubernetesResource - } else { - - r.MembershipCrManifest = res.MembershipCrManifest - - r.MembershipResources = res.MembershipResources - - r.ConnectResources = res.ConnectResources - - r.ResourceOptions = res.ResourceOptions - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointKubernetesResource is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointKubernetesResource *MembershipEndpointKubernetesResource = &MembershipEndpointKubernetesResource{empty: true} - -func (r *MembershipEndpointKubernetesResource) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointKubernetesResource) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointKubernetesResource) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointKubernetesResourceMembershipResources struct { - empty bool `json:"-"` - Manifest *string `json:"manifest"` - ClusterScoped *bool `json:"clusterScoped"` -} - -type jsonMembershipEndpointKubernetesResourceMembershipResources MembershipEndpointKubernetesResourceMembershipResources - -func (r *MembershipEndpointKubernetesResourceMembershipResources) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointKubernetesResourceMembershipResources - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointKubernetesResourceMembershipResources - } else { - - r.Manifest = res.Manifest - - r.ClusterScoped = res.ClusterScoped - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceMembershipResources is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointKubernetesResourceMembershipResources *MembershipEndpointKubernetesResourceMembershipResources = &MembershipEndpointKubernetesResourceMembershipResources{empty: true} - -func (r *MembershipEndpointKubernetesResourceMembershipResources) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointKubernetesResourceMembershipResources) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointKubernetesResourceMembershipResources) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointKubernetesResourceConnectResources struct { - empty bool `json:"-"` - Manifest *string `json:"manifest"` - ClusterScoped *bool `json:"clusterScoped"` -} - -type jsonMembershipEndpointKubernetesResourceConnectResources MembershipEndpointKubernetesResourceConnectResources - -func (r *MembershipEndpointKubernetesResourceConnectResources) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointKubernetesResourceConnectResources - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointKubernetesResourceConnectResources - } else { - - r.Manifest = res.Manifest - - r.ClusterScoped = res.ClusterScoped - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceConnectResources is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointKubernetesResourceConnectResources *MembershipEndpointKubernetesResourceConnectResources = &MembershipEndpointKubernetesResourceConnectResources{empty: true} - -func (r *MembershipEndpointKubernetesResourceConnectResources) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointKubernetesResourceConnectResources) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointKubernetesResourceConnectResources) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipEndpointKubernetesResourceResourceOptions struct { - empty bool `json:"-"` - ConnectVersion *string `json:"connectVersion"` - V1Beta1Crd *bool `json:"v1beta1Crd"` -} - -type jsonMembershipEndpointKubernetesResourceResourceOptions MembershipEndpointKubernetesResourceResourceOptions - -func (r *MembershipEndpointKubernetesResourceResourceOptions) UnmarshalJSON(data []byte) error { - var res jsonMembershipEndpointKubernetesResourceResourceOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipEndpointKubernetesResourceResourceOptions - } else { - - r.ConnectVersion = res.ConnectVersion - - r.V1Beta1Crd = res.V1Beta1Crd - - } - return nil -} - -// This object is used to assert a desired state where this MembershipEndpointKubernetesResourceResourceOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipEndpointKubernetesResourceResourceOptions *MembershipEndpointKubernetesResourceResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{empty: true} - -func (r *MembershipEndpointKubernetesResourceResourceOptions) Empty() bool { - return r.empty -} - -func (r *MembershipEndpointKubernetesResourceResourceOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipEndpointKubernetesResourceResourceOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipState struct { - empty bool `json:"-"` - Code *MembershipStateCodeEnum `json:"code"` -} - -type jsonMembershipState MembershipState - -func (r *MembershipState) UnmarshalJSON(data []byte) error { - var res jsonMembershipState - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipState - } else { - - r.Code = res.Code - - } - return nil -} - -// This object is used to assert a desired state where this MembershipState is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipState *MembershipState = &MembershipState{empty: true} - -func (r *MembershipState) Empty() bool { - return r.empty -} - -func (r *MembershipState) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipState) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MembershipAuthority struct { - empty bool `json:"-"` - Issuer *string `json:"issuer"` - WorkloadIdentityPool *string `json:"workloadIdentityPool"` - IdentityProvider *string `json:"identityProvider"` -} - -type jsonMembershipAuthority MembershipAuthority - -func (r *MembershipAuthority) UnmarshalJSON(data []byte) error { - var res jsonMembershipAuthority - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMembershipAuthority - } else { - - r.Issuer = res.Issuer - - r.WorkloadIdentityPool = res.WorkloadIdentityPool - - r.IdentityProvider = res.IdentityProvider - - } - return nil -} - -// This object is used to assert a desired state where this MembershipAuthority is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMembershipAuthority *MembershipAuthority = &MembershipAuthority{empty: true} - -func (r *MembershipAuthority) Empty() bool { - return r.empty -} - -func (r *MembershipAuthority) String() string { - return dcl.SprintResource(r) -} - -func (r *MembershipAuthority) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.Sum256([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Membership) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "gke_hub", - Type: "Membership", - Version: "beta", - } -} - -func (r *Membership) ID() (string, error) { - if err := extractMembershipFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "endpoint": dcl.ValueOrEmptyString(nr.Endpoint), - "name": dcl.ValueOrEmptyString(nr.Name), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "description": dcl.ValueOrEmptyString(nr.Description), - "state": dcl.ValueOrEmptyString(nr.State), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), - "external_id": dcl.ValueOrEmptyString(nr.ExternalId), - "last_connection_time": dcl.ValueOrEmptyString(nr.LastConnectionTime), - "unique_id": dcl.ValueOrEmptyString(nr.UniqueId), - "authority": dcl.ValueOrEmptyString(nr.Authority), - "infrastructure_type": dcl.ValueOrEmptyString(nr.InfrastructureType), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.Nprintf("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", params), nil -} - -const MembershipMaxPage = -1 - -type MembershipList struct { - Items []*Membership - - nextToken string - - pageSize int32 - - resource *Membership -} - -func (l *MembershipList) HasNext() bool { - return l.nextToken != "" -} - -func (l *MembershipList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listMembership(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListMembership(ctx context.Context, project, location string) (*MembershipList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListMembershipWithMaxResults(ctx, project, location, MembershipMaxPage) - -} - -func (c *Client) ListMembershipWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*MembershipList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Membership{ - Project: &project, - Location: &location, - } - items, token, err := c.listMembership(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &MembershipList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetMembership(ctx context.Context, r *Membership) (*Membership, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractMembershipFields(r) - - b, err := c.getMembershipRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalMembership(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeMembershipNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractMembershipFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteMembership(ctx context.Context, r *Membership) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Membership resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Membership...") - deleteOp := deleteMembershipOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllMembership deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllMembership(ctx context.Context, project, location string, filter func(*Membership) bool) error { - listObj, err := c.ListMembership(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllMembership(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllMembership(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyMembership(ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (*Membership, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Membership - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyMembershipHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyMembershipHelper(c *Client, ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (*Membership, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyMembership...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractMembershipFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.membershipDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToMembershipDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []membershipApiOperation - if create { - ops = append(ops, &createMembershipOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyMembershipDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyMembershipDiff(c *Client, ctx context.Context, desired *Membership, rawDesired *Membership, ops []membershipApiOperation, opts ...dcl.ApplyOption) (*Membership, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetMembership(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createMembershipOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapMembership(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeMembershipNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeMembershipNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeMembershipDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractMembershipFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractMembershipFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffMembership(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} - -func (r *Membership) GetPolicy(basePath string) (string, string, *bytes.Buffer, error) { - u := r.getPolicyURL(basePath) - body := &bytes.Buffer{} - u, err := dcl.AddQueryParams(u, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", r.IAMPolicyVersion())}) - if err != nil { - return "", "", nil, err - } - return u, "", body, nil -} diff --git a/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl b/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl deleted file mode 100644 index d0f34246b2d8..000000000000 --- a/mmv1/third_party/terraform/services/gkehub/membership_internal.go.tmpl +++ /dev/null @@ -1,3830 +0,0 @@ -package gkehub - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" - "github.com/hashicorp/terraform-provider-google/google/tpgdclresource/operations" -) - -func (r *Membership) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Endpoint) { - if err := r.Endpoint.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.State) { - if err := r.State.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Authority) { - if err := r.Authority.validate(); err != nil { - return err - } - } - return nil -} -func (r *MembershipEndpoint) validate() error { - if !dcl.IsEmptyValueIndirect(r.GkeCluster) { - if err := r.GkeCluster.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.KubernetesMetadata) { - if err := r.KubernetesMetadata.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.KubernetesResource) { - if err := r.KubernetesResource.validate(); err != nil { - return err - } - } - return nil -} -func (r *MembershipEndpointGkeCluster) validate() error { - return nil -} -func (r *MembershipEndpointKubernetesMetadata) validate() error { - return nil -} -func (r *MembershipEndpointKubernetesResource) validate() error { - if !dcl.IsEmptyValueIndirect(r.ResourceOptions) { - if err := r.ResourceOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *MembershipEndpointKubernetesResourceMembershipResources) validate() error { - return nil -} -func (r *MembershipEndpointKubernetesResourceConnectResources) validate() error { - return nil -} -func (r *MembershipEndpointKubernetesResourceResourceOptions) validate() error { - return nil -} -func (r *MembershipState) validate() error { - return nil -} -func (r *MembershipAuthority) validate() error { - return nil -} -func (r *Membership) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://gkehub.googleapis.com/v1beta1/", params) -} - -func (r *Membership) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -func (r *Membership) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships", nr.basePath(), userBasePath, params), nil - -} - -func (r *Membership) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships?membershipId={{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil - -} - -func (r *Membership) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, params), nil -} - -func (r *Membership) SetPolicyURL(userBasePath string) string { - nr := r.urlNormalized() - fields := map[string]interface{}{} - return dcl.URL("", nr.basePath(), userBasePath, fields) -} - -func (r *Membership) SetPolicyVerb() string { - return "" -} - -func (r *Membership) getPolicyURL(userBasePath string) string { - nr := r.urlNormalized() - fields := map[string]interface{}{} - return dcl.URL("", nr.basePath(), userBasePath, fields) -} - -func (r *Membership) IAMPolicyVersion() int { - return 3 -} - -// membershipApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type membershipApiOperation interface { - do(context.Context, *Membership, *Client) error -} - -// newUpdateMembershipUpdateMembershipRequest creates a request for an -// Membership resource's UpdateMembership update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateMembershipUpdateMembershipRequest(ctx context.Context, f *Membership, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v, err := expandMembershipEndpoint(c, f.Endpoint, res); err != nil { - return nil, fmt.Errorf("error expanding Endpoint into endpoint: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["endpoint"] = v - } - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.ExternalId; !dcl.IsEmptyValueIndirect(v) { - req["externalId"] = v - } - if v, err := expandMembershipAuthority(c, f.Authority, res); err != nil { - return nil, fmt.Errorf("error expanding Authority into authority: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["authority"] = v - } - if v := f.InfrastructureType; !dcl.IsEmptyValueIndirect(v) { - req["infrastructureType"] = v - } - return req, nil -} - -// marshalUpdateMembershipUpdateMembershipRequest converts the update into -// the final JSON request body. -func marshalUpdateMembershipUpdateMembershipRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateMembershipUpdateMembershipOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateMembershipUpdateMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { - _, err := c.GetMembership(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateMembership") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateMembershipUpdateMembershipRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateMembershipUpdateMembershipRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") - - if err != nil { - return err - } - - return nil -} - -func (c *Client) listMembershipRaw(ctx context.Context, r *Membership, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != MembershipMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listMembershipOperation struct { - Resources []map[string]interface{} `json:"resources"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listMembership(ctx context.Context, r *Membership, pageToken string, pageSize int32) ([]*Membership, string, error) { - b, err := c.listMembershipRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listMembershipOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Membership - for _, v := range m.Resources { - res, err := unmarshalMapMembership(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllMembership(ctx context.Context, f func(*Membership) bool, resources []*Membership) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteMembership(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteMembershipOperation struct{} - -func (op *deleteMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { - r, err := c.GetMembership(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Membership not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetMembership checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetMembership(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createMembershipOperation struct { - response map[string]interface{} -} - -func (op *createMembershipOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createMembershipOperation) do(ctx context.Context, r *Membership, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetMembership(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getMembershipRaw(ctx context.Context, r *Membership) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) membershipDiffsForRawDesired(ctx context.Context, rawDesired *Membership, opts ...dcl.ApplyOption) (initial, desired *Membership, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Membership - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Membership); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Membership, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetMembership(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Membership resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Membership resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Membership resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeMembershipDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Membership: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Membership: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractMembershipFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeMembershipInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Membership: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeMembershipDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Membership: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffMembership(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeMembershipInitialState(rawInitial, rawDesired *Membership) (*Membership, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeMembershipDesiredState(rawDesired, rawInitial *Membership, opts ...dcl.ApplyOption) (*Membership, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.Endpoint = canonicalizeMembershipEndpoint(rawDesired.Endpoint, nil, opts...) - rawDesired.State = canonicalizeMembershipState(rawDesired.State, nil, opts...) - rawDesired.Authority = canonicalizeMembershipAuthority(rawDesired.Authority, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Membership{} - canonicalDesired.Endpoint = canonicalizeMembershipEndpoint(rawDesired.Endpoint, rawInitial.Endpoint, opts...) - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.ExternalId, rawInitial.ExternalId) { - canonicalDesired.ExternalId = rawInitial.ExternalId - } else { - canonicalDesired.ExternalId = rawDesired.ExternalId - } - canonicalDesired.Authority = canonicalizeMembershipAuthority(rawDesired.Authority, rawInitial.Authority, opts...) - if dcl.IsZeroValue(rawDesired.InfrastructureType) || (dcl.IsEmptyValueIndirect(rawDesired.InfrastructureType) && dcl.IsEmptyValueIndirect(rawInitial.InfrastructureType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.InfrastructureType = rawInitial.InfrastructureType - } else { - canonicalDesired.InfrastructureType = rawDesired.InfrastructureType - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - return canonicalDesired, nil -} - -func canonicalizeMembershipNewState(c *Client, rawNew, rawDesired *Membership) (*Membership, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Endpoint) && dcl.IsEmptyValueIndirect(rawDesired.Endpoint) { - rawNew.Endpoint = rawDesired.Endpoint - } else { - rawNew.Endpoint = canonicalizeNewMembershipEndpoint(c, rawDesired.Endpoint, rawNew.Endpoint) - } - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.State) && dcl.IsEmptyValueIndirect(rawDesired.State) { - rawNew.State = rawDesired.State - } else { - rawNew.State = canonicalizeNewMembershipState(c, rawDesired.State, rawNew.State) - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { - rawNew.DeleteTime = rawDesired.DeleteTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.ExternalId) && dcl.IsEmptyValueIndirect(rawDesired.ExternalId) { - rawNew.ExternalId = rawDesired.ExternalId - } else { - if dcl.StringCanonicalize(rawDesired.ExternalId, rawNew.ExternalId) { - rawNew.ExternalId = rawDesired.ExternalId - } - } - - if dcl.IsEmptyValueIndirect(rawNew.LastConnectionTime) && dcl.IsEmptyValueIndirect(rawDesired.LastConnectionTime) { - rawNew.LastConnectionTime = rawDesired.LastConnectionTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UniqueId) && dcl.IsEmptyValueIndirect(rawDesired.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } else { - if dcl.StringCanonicalize(rawDesired.UniqueId, rawNew.UniqueId) { - rawNew.UniqueId = rawDesired.UniqueId - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Authority) && dcl.IsEmptyValueIndirect(rawDesired.Authority) { - rawNew.Authority = rawDesired.Authority - } else { - rawNew.Authority = canonicalizeNewMembershipAuthority(c, rawDesired.Authority, rawNew.Authority) - } - - if dcl.IsEmptyValueIndirect(rawNew.InfrastructureType) && dcl.IsEmptyValueIndirect(rawDesired.InfrastructureType) { - rawNew.InfrastructureType = rawDesired.InfrastructureType - } else { - } - - rawNew.Project = rawDesired.Project - - rawNew.Location = rawDesired.Location - - return rawNew, nil -} - -func canonicalizeMembershipEndpoint(des, initial *MembershipEndpoint, opts ...dcl.ApplyOption) *MembershipEndpoint { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpoint{} - - cDes.GkeCluster = canonicalizeMembershipEndpointGkeCluster(des.GkeCluster, initial.GkeCluster, opts...) - cDes.KubernetesResource = canonicalizeMembershipEndpointKubernetesResource(des.KubernetesResource, initial.KubernetesResource, opts...) - - return cDes -} - -func canonicalizeMembershipEndpointSlice(des, initial []MembershipEndpoint, opts ...dcl.ApplyOption) []MembershipEndpoint { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpoint, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpoint(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpoint, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpoint(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpoint(c *Client, des, nw *MembershipEndpoint) *MembershipEndpoint { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpoint while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.GkeCluster = canonicalizeNewMembershipEndpointGkeCluster(c, des.GkeCluster, nw.GkeCluster) - nw.KubernetesMetadata = canonicalizeNewMembershipEndpointKubernetesMetadata(c, des.KubernetesMetadata, nw.KubernetesMetadata) - nw.KubernetesResource = canonicalizeNewMembershipEndpointKubernetesResource(c, des.KubernetesResource, nw.KubernetesResource) - - return nw -} - -func canonicalizeNewMembershipEndpointSet(c *Client, des, nw []MembershipEndpoint) []MembershipEndpoint { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpoint - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpoint(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointSlice(c *Client, des, nw []MembershipEndpoint) []MembershipEndpoint { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpoint - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpoint(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointGkeCluster(des, initial *MembershipEndpointGkeCluster, opts ...dcl.ApplyOption) *MembershipEndpointGkeCluster { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointGkeCluster{} - - if dcl.IsZeroValue(des.ResourceLink) || (dcl.IsEmptyValueIndirect(des.ResourceLink) && dcl.IsEmptyValueIndirect(initial.ResourceLink)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ResourceLink = initial.ResourceLink - } else { - cDes.ResourceLink = des.ResourceLink - } - - return cDes -} - -func canonicalizeMembershipEndpointGkeClusterSlice(des, initial []MembershipEndpointGkeCluster, opts ...dcl.ApplyOption) []MembershipEndpointGkeCluster { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointGkeCluster, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointGkeCluster(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointGkeCluster, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointGkeCluster(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointGkeCluster(c *Client, des, nw *MembershipEndpointGkeCluster) *MembershipEndpointGkeCluster { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointGkeCluster while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewMembershipEndpointGkeClusterSet(c *Client, des, nw []MembershipEndpointGkeCluster) []MembershipEndpointGkeCluster { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointGkeCluster - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointGkeClusterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointGkeCluster(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointGkeClusterSlice(c *Client, des, nw []MembershipEndpointGkeCluster) []MembershipEndpointGkeCluster { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointGkeCluster - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointGkeCluster(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointKubernetesMetadata(des, initial *MembershipEndpointKubernetesMetadata, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesMetadata { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointKubernetesMetadata{} - - return cDes -} - -func canonicalizeMembershipEndpointKubernetesMetadataSlice(des, initial []MembershipEndpointKubernetesMetadata, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesMetadata { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointKubernetesMetadata, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointKubernetesMetadata(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointKubernetesMetadata, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointKubernetesMetadata(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointKubernetesMetadata(c *Client, des, nw *MembershipEndpointKubernetesMetadata) *MembershipEndpointKubernetesMetadata { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesMetadata while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.KubernetesApiServerVersion, nw.KubernetesApiServerVersion) { - nw.KubernetesApiServerVersion = des.KubernetesApiServerVersion - } - if dcl.StringCanonicalize(des.NodeProviderId, nw.NodeProviderId) { - nw.NodeProviderId = des.NodeProviderId - } - - return nw -} - -func canonicalizeNewMembershipEndpointKubernetesMetadataSet(c *Client, des, nw []MembershipEndpointKubernetesMetadata) []MembershipEndpointKubernetesMetadata { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointKubernetesMetadata - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointKubernetesMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointKubernetesMetadata(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointKubernetesMetadataSlice(c *Client, des, nw []MembershipEndpointKubernetesMetadata) []MembershipEndpointKubernetesMetadata { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointKubernetesMetadata - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointKubernetesMetadata(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointKubernetesResource(des, initial *MembershipEndpointKubernetesResource, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResource { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointKubernetesResource{} - - if dcl.StringCanonicalize(des.MembershipCrManifest, initial.MembershipCrManifest) || dcl.IsZeroValue(des.MembershipCrManifest) { - cDes.MembershipCrManifest = initial.MembershipCrManifest - } else { - cDes.MembershipCrManifest = des.MembershipCrManifest - } - cDes.ResourceOptions = canonicalizeMembershipEndpointKubernetesResourceResourceOptions(des.ResourceOptions, initial.ResourceOptions, opts...) - - return cDes -} - -func canonicalizeMembershipEndpointKubernetesResourceSlice(des, initial []MembershipEndpointKubernetesResource, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResource { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointKubernetesResource, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResource(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointKubernetesResource, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResource(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointKubernetesResource(c *Client, des, nw *MembershipEndpointKubernetesResource) *MembershipEndpointKubernetesResource { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResource while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.MembershipCrManifest = des.MembershipCrManifest - nw.MembershipResources = canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSlice(c, des.MembershipResources, nw.MembershipResources) - nw.ConnectResources = canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSlice(c, des.ConnectResources, nw.ConnectResources) - nw.ResourceOptions = canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, des.ResourceOptions, nw.ResourceOptions) - - return nw -} - -func canonicalizeNewMembershipEndpointKubernetesResourceSet(c *Client, des, nw []MembershipEndpointKubernetesResource) []MembershipEndpointKubernetesResource { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointKubernetesResource - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointKubernetesResourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResource(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointKubernetesResourceSlice(c *Client, des, nw []MembershipEndpointKubernetesResource) []MembershipEndpointKubernetesResource { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointKubernetesResource - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResource(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointKubernetesResourceMembershipResources(des, initial *MembershipEndpointKubernetesResourceMembershipResources, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceMembershipResources { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointKubernetesResourceMembershipResources{} - - if dcl.StringCanonicalize(des.Manifest, initial.Manifest) || dcl.IsZeroValue(des.Manifest) { - cDes.Manifest = initial.Manifest - } else { - cDes.Manifest = des.Manifest - } - if dcl.BoolCanonicalize(des.ClusterScoped, initial.ClusterScoped) || dcl.IsZeroValue(des.ClusterScoped) { - cDes.ClusterScoped = initial.ClusterScoped - } else { - cDes.ClusterScoped = des.ClusterScoped - } - - return cDes -} - -func canonicalizeMembershipEndpointKubernetesResourceMembershipResourcesSlice(des, initial []MembershipEndpointKubernetesResourceMembershipResources, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceMembershipResources { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceMembershipResources(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceMembershipResources(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c *Client, des, nw *MembershipEndpointKubernetesResourceMembershipResources) *MembershipEndpointKubernetesResourceMembershipResources { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceMembershipResources while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Manifest, nw.Manifest) { - nw.Manifest = des.Manifest - } - if dcl.BoolCanonicalize(des.ClusterScoped, nw.ClusterScoped) { - nw.ClusterScoped = des.ClusterScoped - } - - return nw -} - -func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSet(c *Client, des, nw []MembershipEndpointKubernetesResourceMembershipResources) []MembershipEndpointKubernetesResourceMembershipResources { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointKubernetesResourceMembershipResources - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceMembershipResources) []MembershipEndpointKubernetesResourceMembershipResources { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointKubernetesResourceMembershipResources - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceMembershipResources(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointKubernetesResourceConnectResources(des, initial *MembershipEndpointKubernetesResourceConnectResources, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceConnectResources { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointKubernetesResourceConnectResources{} - - if dcl.StringCanonicalize(des.Manifest, initial.Manifest) || dcl.IsZeroValue(des.Manifest) { - cDes.Manifest = initial.Manifest - } else { - cDes.Manifest = des.Manifest - } - if dcl.BoolCanonicalize(des.ClusterScoped, initial.ClusterScoped) || dcl.IsZeroValue(des.ClusterScoped) { - cDes.ClusterScoped = initial.ClusterScoped - } else { - cDes.ClusterScoped = des.ClusterScoped - } - - return cDes -} - -func canonicalizeMembershipEndpointKubernetesResourceConnectResourcesSlice(des, initial []MembershipEndpointKubernetesResourceConnectResources, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceConnectResources { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceConnectResources(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceConnectResources(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c *Client, des, nw *MembershipEndpointKubernetesResourceConnectResources) *MembershipEndpointKubernetesResourceConnectResources { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceConnectResources while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Manifest, nw.Manifest) { - nw.Manifest = des.Manifest - } - if dcl.BoolCanonicalize(des.ClusterScoped, nw.ClusterScoped) { - nw.ClusterScoped = des.ClusterScoped - } - - return nw -} - -func canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSet(c *Client, des, nw []MembershipEndpointKubernetesResourceConnectResources) []MembershipEndpointKubernetesResourceConnectResources { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointKubernetesResourceConnectResources - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceConnectResources) []MembershipEndpointKubernetesResourceConnectResources { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointKubernetesResourceConnectResources - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceConnectResources(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipEndpointKubernetesResourceResourceOptions(des, initial *MembershipEndpointKubernetesResourceResourceOptions, opts ...dcl.ApplyOption) *MembershipEndpointKubernetesResourceResourceOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipEndpointKubernetesResourceResourceOptions{} - - if dcl.StringCanonicalize(des.ConnectVersion, initial.ConnectVersion) || dcl.IsZeroValue(des.ConnectVersion) { - cDes.ConnectVersion = initial.ConnectVersion - } else { - cDes.ConnectVersion = des.ConnectVersion - } - if dcl.BoolCanonicalize(des.V1Beta1Crd, initial.V1Beta1Crd) || dcl.IsZeroValue(des.V1Beta1Crd) { - cDes.V1Beta1Crd = initial.V1Beta1Crd - } else { - cDes.V1Beta1Crd = des.V1Beta1Crd - } - - return cDes -} - -func canonicalizeMembershipEndpointKubernetesResourceResourceOptionsSlice(des, initial []MembershipEndpointKubernetesResourceResourceOptions, opts ...dcl.ApplyOption) []MembershipEndpointKubernetesResourceResourceOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceResourceOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipEndpointKubernetesResourceResourceOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c *Client, des, nw *MembershipEndpointKubernetesResourceResourceOptions) *MembershipEndpointKubernetesResourceResourceOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipEndpointKubernetesResourceResourceOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.ConnectVersion, nw.ConnectVersion) { - nw.ConnectVersion = des.ConnectVersion - } - if dcl.BoolCanonicalize(des.V1Beta1Crd, nw.V1Beta1Crd) { - nw.V1Beta1Crd = des.V1Beta1Crd - } - - return nw -} - -func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptionsSet(c *Client, des, nw []MembershipEndpointKubernetesResourceResourceOptions) []MembershipEndpointKubernetesResourceResourceOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipEndpointKubernetesResourceResourceOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, des, nw []MembershipEndpointKubernetesResourceResourceOptions) []MembershipEndpointKubernetesResourceResourceOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipEndpointKubernetesResourceResourceOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipEndpointKubernetesResourceResourceOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipState(des, initial *MembershipState, opts ...dcl.ApplyOption) *MembershipState { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipState{} - - return cDes -} - -func canonicalizeMembershipStateSlice(des, initial []MembershipState, opts ...dcl.ApplyOption) []MembershipState { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipState, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipState(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipState, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipState(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipState(c *Client, des, nw *MembershipState) *MembershipState { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipState while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewMembershipStateSet(c *Client, des, nw []MembershipState) []MembershipState { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipState - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipState(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipStateSlice(c *Client, des, nw []MembershipState) []MembershipState { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipState - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipState(c, &d, &n)) - } - - return items -} - -func canonicalizeMembershipAuthority(des, initial *MembershipAuthority, opts ...dcl.ApplyOption) *MembershipAuthority { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MembershipAuthority{} - - if dcl.StringCanonicalize(des.Issuer, initial.Issuer) || dcl.IsZeroValue(des.Issuer) { - cDes.Issuer = initial.Issuer - } else { - cDes.Issuer = des.Issuer - } - - return cDes -} - -func canonicalizeMembershipAuthoritySlice(des, initial []MembershipAuthority, opts ...dcl.ApplyOption) []MembershipAuthority { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MembershipAuthority, 0, len(des)) - for _, d := range des { - cd := canonicalizeMembershipAuthority(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MembershipAuthority, 0, len(des)) - for i, d := range des { - cd := canonicalizeMembershipAuthority(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMembershipAuthority(c *Client, des, nw *MembershipAuthority) *MembershipAuthority { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MembershipAuthority while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Issuer, nw.Issuer) { - nw.Issuer = des.Issuer - } - if dcl.StringCanonicalize(des.WorkloadIdentityPool, nw.WorkloadIdentityPool) { - nw.WorkloadIdentityPool = des.WorkloadIdentityPool - } - if dcl.StringCanonicalize(des.IdentityProvider, nw.IdentityProvider) { - nw.IdentityProvider = des.IdentityProvider - } - - return nw -} - -func canonicalizeNewMembershipAuthoritySet(c *Client, des, nw []MembershipAuthority) []MembershipAuthority { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MembershipAuthority - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMembershipAuthorityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMembershipAuthority(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMembershipAuthoritySlice(c *Client, des, nw []MembershipAuthority) []MembershipAuthority { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MembershipAuthority - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMembershipAuthority(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffMembership(c *Client, desired, actual *Membership, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Endpoint, actual.Endpoint, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointNewStyle, EmptyObject: EmptyMembershipEndpoint, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Endpoint")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipStateNewStyle, EmptyObject: EmptyMembershipState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExternalId, actual.ExternalId, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("ExternalId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LastConnectionTime, actual.LastConnectionTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LastConnectionTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UniqueId, actual.UniqueId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UniqueId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Authority, actual.Authority, dcl.DiffInfo{ObjectFunction: compareMembershipAuthorityNewStyle, EmptyObject: EmptyMembershipAuthority, OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Authority")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.InfrastructureType, actual.InfrastructureType, dcl.DiffInfo{ServerDefault: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("InfrastructureType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareMembershipEndpointNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpoint) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpoint) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpoint or *MembershipEndpoint", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpoint) - if !ok { - actualNotPointer, ok := a.(MembershipEndpoint) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpoint", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GkeCluster, actual.GkeCluster, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointGkeClusterNewStyle, EmptyObject: EmptyMembershipEndpointGkeCluster, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GkeCluster")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KubernetesMetadata, actual.KubernetesMetadata, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesMetadataNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesMetadata, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesMetadata")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.KubernetesResource, actual.KubernetesResource, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointKubernetesResourceNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResource, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesResource")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointGkeClusterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointGkeCluster) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointGkeCluster) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointGkeCluster or *MembershipEndpointGkeCluster", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointGkeCluster) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointGkeCluster) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointGkeCluster", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ResourceLink, actual.ResourceLink, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceLink")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointKubernetesMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointKubernetesMetadata) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointKubernetesMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesMetadata or *MembershipEndpointKubernetesMetadata", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointKubernetesMetadata) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointKubernetesMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesMetadata", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.KubernetesApiServerVersion, actual.KubernetesApiServerVersion, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KubernetesApiServerVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NodeProviderId, actual.NodeProviderId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeProviderId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NodeCount, actual.NodeCount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("NodeCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.VcpuCount, actual.VcpuCount, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VcpuCount")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MemoryMb, actual.MemoryMb, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MemoryMb")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointKubernetesResourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointKubernetesResource) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointKubernetesResource) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResource or *MembershipEndpointKubernetesResource", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointKubernetesResource) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointKubernetesResource) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResource", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.MembershipCrManifest, actual.MembershipCrManifest, dcl.DiffInfo{Ignore: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipCrManifest")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MembershipResources, actual.MembershipResources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceMembershipResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MembershipResources")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ConnectResources, actual.ConnectResources, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceConnectResources, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConnectResources")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceOptions, actual.ResourceOptions, dcl.DiffInfo{ObjectFunction: compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle, EmptyObject: EmptyMembershipEndpointKubernetesResourceResourceOptions, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointKubernetesResourceMembershipResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointKubernetesResourceMembershipResources) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceMembershipResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceMembershipResources or *MembershipEndpointKubernetesResourceMembershipResources", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointKubernetesResourceMembershipResources) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceMembershipResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceMembershipResources", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Manifest, actual.Manifest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Manifest")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ClusterScoped, actual.ClusterScoped, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterScoped")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointKubernetesResourceConnectResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointKubernetesResourceConnectResources) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceConnectResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceConnectResources or *MembershipEndpointKubernetesResourceConnectResources", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointKubernetesResourceConnectResources) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceConnectResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceConnectResources", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Manifest, actual.Manifest, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Manifest")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ClusterScoped, actual.ClusterScoped, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ClusterScoped")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipEndpointKubernetesResourceResourceOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipEndpointKubernetesResourceResourceOptions) - if !ok { - desiredNotPointer, ok := d.(MembershipEndpointKubernetesResourceResourceOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceResourceOptions or *MembershipEndpointKubernetesResourceResourceOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipEndpointKubernetesResourceResourceOptions) - if !ok { - actualNotPointer, ok := a.(MembershipEndpointKubernetesResourceResourceOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipEndpointKubernetesResourceResourceOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ConnectVersion, actual.ConnectVersion, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ConnectVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.V1Beta1Crd, actual.V1Beta1Crd, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("V1beta1Crd")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipState) - if !ok { - desiredNotPointer, ok := d.(MembershipState) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipState or *MembershipState", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipState) - if !ok { - actualNotPointer, ok := a.(MembershipState) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipState", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Code, actual.Code, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Code")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMembershipAuthorityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MembershipAuthority) - if !ok { - desiredNotPointer, ok := d.(MembershipAuthority) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipAuthority or *MembershipAuthority", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MembershipAuthority) - if !ok { - actualNotPointer, ok := a.(MembershipAuthority) - if !ok { - return nil, fmt.Errorf("obj %v is not a MembershipAuthority", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Issuer, actual.Issuer, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateMembershipUpdateMembershipOperation")}, fn.AddNest("Issuer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.WorkloadIdentityPool, actual.WorkloadIdentityPool, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WorkloadIdentityPool")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IdentityProvider, actual.IdentityProvider, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IdentityProvider")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Membership) urlNormalized() *Membership { - normalized := dcl.Copy(*r).(Membership) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.ExternalId = dcl.SelfLinkToName(r.ExternalId) - normalized.UniqueId = dcl.SelfLinkToName(r.UniqueId) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *Membership) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateMembership" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{ "{{" }}project{{ "}}" }}/locations/{{ "{{" }}location{{ "}}" }}/memberships/{{ "{{" }}name{{ "}}" }}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Membership resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Membership) marshal(c *Client) ([]byte, error) { - m, err := expandMembership(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Membership: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalMembership decodes JSON responses into the Membership resource schema. -func unmarshalMembership(b []byte, c *Client, res *Membership) (*Membership, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapMembership(m, c, res) -} - -func unmarshalMapMembership(m map[string]interface{}, c *Client, res *Membership) (*Membership, error) { - - flattened := flattenMembership(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandMembership expands Membership into a JSON request object. -func expandMembership(c *Client, f *Membership) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := expandMembershipEndpoint(c, f.Endpoint, res); err != nil { - return nil, fmt.Errorf("error expanding Endpoint into endpoint: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["endpoint"] = v - } - if v, err := dcl.DeriveField("projects/%s/locations/%s/memberships/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.ExternalId; dcl.ValueShouldBeSent(v) { - m["externalId"] = v - } - if v, err := expandMembershipAuthority(c, f.Authority, res); err != nil { - return nil, fmt.Errorf("error expanding Authority into authority: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["authority"] = v - } - if v := f.InfrastructureType; dcl.ValueShouldBeSent(v) { - m["infrastructureType"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - - return m, nil -} - -// flattenMembership flattens Membership from a JSON request object into the -// Membership type. -func flattenMembership(c *Client, i interface{}, res *Membership) *Membership { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Membership{} - resultRes.Endpoint = flattenMembershipEndpoint(c, m["endpoint"], res) - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.State = flattenMembershipState(c, m["state"], res) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) - resultRes.ExternalId = dcl.FlattenString(m["externalId"]) - resultRes.LastConnectionTime = dcl.FlattenString(m["lastConnectionTime"]) - resultRes.UniqueId = dcl.FlattenString(m["uniqueId"]) - resultRes.Authority = flattenMembershipAuthority(c, m["authority"], res) - resultRes.InfrastructureType = flattenMembershipInfrastructureTypeEnum(m["infrastructureType"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - - return resultRes -} - -// expandMembershipEndpointMap expands the contents of MembershipEndpoint into a JSON -// request object. -func expandMembershipEndpointMap(c *Client, f map[string]MembershipEndpoint, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpoint(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointSlice expands the contents of MembershipEndpoint into a JSON -// request object. -func expandMembershipEndpointSlice(c *Client, f []MembershipEndpoint, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpoint(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointMap flattens the contents of MembershipEndpoint from a JSON -// response object. -func flattenMembershipEndpointMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpoint { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpoint{} - } - - if len(a) == 0 { - return map[string]MembershipEndpoint{} - } - - items := make(map[string]MembershipEndpoint) - for k, item := range a { - items[k] = *flattenMembershipEndpoint(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointSlice flattens the contents of MembershipEndpoint from a JSON -// response object. -func flattenMembershipEndpointSlice(c *Client, i interface{}, res *Membership) []MembershipEndpoint { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpoint{} - } - - if len(a) == 0 { - return []MembershipEndpoint{} - } - - items := make([]MembershipEndpoint, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpoint(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpoint expands an instance of MembershipEndpoint into a JSON -// request object. -func expandMembershipEndpoint(c *Client, f *MembershipEndpoint, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandMembershipEndpointGkeCluster(c, f.GkeCluster, res); err != nil { - return nil, fmt.Errorf("error expanding GkeCluster into gkeCluster: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gkeCluster"] = v - } - if v, err := expandMembershipEndpointKubernetesResource(c, f.KubernetesResource, res); err != nil { - return nil, fmt.Errorf("error expanding KubernetesResource into kubernetesResource: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["kubernetesResource"] = v - } - - return m, nil -} - -// flattenMembershipEndpoint flattens an instance of MembershipEndpoint from a JSON -// response object. -func flattenMembershipEndpoint(c *Client, i interface{}, res *Membership) *MembershipEndpoint { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpoint{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpoint - } - r.GkeCluster = flattenMembershipEndpointGkeCluster(c, m["gkeCluster"], res) - r.KubernetesMetadata = flattenMembershipEndpointKubernetesMetadata(c, m["kubernetesMetadata"], res) - r.KubernetesResource = flattenMembershipEndpointKubernetesResource(c, m["kubernetesResource"], res) - - return r -} - -// expandMembershipEndpointGkeClusterMap expands the contents of MembershipEndpointGkeCluster into a JSON -// request object. -func expandMembershipEndpointGkeClusterMap(c *Client, f map[string]MembershipEndpointGkeCluster, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointGkeCluster(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointGkeClusterSlice expands the contents of MembershipEndpointGkeCluster into a JSON -// request object. -func expandMembershipEndpointGkeClusterSlice(c *Client, f []MembershipEndpointGkeCluster, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointGkeCluster(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointGkeClusterMap flattens the contents of MembershipEndpointGkeCluster from a JSON -// response object. -func flattenMembershipEndpointGkeClusterMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointGkeCluster { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointGkeCluster{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointGkeCluster{} - } - - items := make(map[string]MembershipEndpointGkeCluster) - for k, item := range a { - items[k] = *flattenMembershipEndpointGkeCluster(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointGkeClusterSlice flattens the contents of MembershipEndpointGkeCluster from a JSON -// response object. -func flattenMembershipEndpointGkeClusterSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointGkeCluster { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointGkeCluster{} - } - - if len(a) == 0 { - return []MembershipEndpointGkeCluster{} - } - - items := make([]MembershipEndpointGkeCluster, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointGkeCluster(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointGkeCluster expands an instance of MembershipEndpointGkeCluster into a JSON -// request object. -func expandMembershipEndpointGkeCluster(c *Client, f *MembershipEndpointGkeCluster, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandHubReferenceLink(c, f.ResourceLink, res); err != nil { - return nil, fmt.Errorf("error expanding ResourceLink into resourceLink: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["resourceLink"] = v - } - - return m, nil -} - -// flattenMembershipEndpointGkeCluster flattens an instance of MembershipEndpointGkeCluster from a JSON -// response object. -func flattenMembershipEndpointGkeCluster(c *Client, i interface{}, res *Membership) *MembershipEndpointGkeCluster { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointGkeCluster{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointGkeCluster - } - r.ResourceLink = flattenHubReferenceLink(c, m["resourceLink"], res) - - return r -} - -// expandMembershipEndpointKubernetesMetadataMap expands the contents of MembershipEndpointKubernetesMetadata into a JSON -// request object. -func expandMembershipEndpointKubernetesMetadataMap(c *Client, f map[string]MembershipEndpointKubernetesMetadata, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointKubernetesMetadata(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointKubernetesMetadataSlice expands the contents of MembershipEndpointKubernetesMetadata into a JSON -// request object. -func expandMembershipEndpointKubernetesMetadataSlice(c *Client, f []MembershipEndpointKubernetesMetadata, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointKubernetesMetadata(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointKubernetesMetadataMap flattens the contents of MembershipEndpointKubernetesMetadata from a JSON -// response object. -func flattenMembershipEndpointKubernetesMetadataMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesMetadata { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointKubernetesMetadata{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointKubernetesMetadata{} - } - - items := make(map[string]MembershipEndpointKubernetesMetadata) - for k, item := range a { - items[k] = *flattenMembershipEndpointKubernetesMetadata(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointKubernetesMetadataSlice flattens the contents of MembershipEndpointKubernetesMetadata from a JSON -// response object. -func flattenMembershipEndpointKubernetesMetadataSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesMetadata { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointKubernetesMetadata{} - } - - if len(a) == 0 { - return []MembershipEndpointKubernetesMetadata{} - } - - items := make([]MembershipEndpointKubernetesMetadata, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointKubernetesMetadata(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointKubernetesMetadata expands an instance of MembershipEndpointKubernetesMetadata into a JSON -// request object. -func expandMembershipEndpointKubernetesMetadata(c *Client, f *MembershipEndpointKubernetesMetadata, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenMembershipEndpointKubernetesMetadata flattens an instance of MembershipEndpointKubernetesMetadata from a JSON -// response object. -func flattenMembershipEndpointKubernetesMetadata(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesMetadata { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointKubernetesMetadata{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointKubernetesMetadata - } - r.KubernetesApiServerVersion = dcl.FlattenString(m["kubernetesApiServerVersion"]) - r.NodeProviderId = dcl.FlattenString(m["nodeProviderId"]) - r.NodeCount = dcl.FlattenInteger(m["nodeCount"]) - r.VcpuCount = dcl.FlattenInteger(m["vcpuCount"]) - r.MemoryMb = dcl.FlattenInteger(m["memoryMb"]) - r.UpdateTime = dcl.FlattenString(m["updateTime"]) - - return r -} - -// expandMembershipEndpointKubernetesResourceMap expands the contents of MembershipEndpointKubernetesResource into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceMap(c *Client, f map[string]MembershipEndpointKubernetesResource, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointKubernetesResource(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointKubernetesResourceSlice expands the contents of MembershipEndpointKubernetesResource into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceSlice(c *Client, f []MembershipEndpointKubernetesResource, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointKubernetesResource(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointKubernetesResourceMap flattens the contents of MembershipEndpointKubernetesResource from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResource { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointKubernetesResource{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointKubernetesResource{} - } - - items := make(map[string]MembershipEndpointKubernetesResource) - for k, item := range a { - items[k] = *flattenMembershipEndpointKubernetesResource(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointKubernetesResourceSlice flattens the contents of MembershipEndpointKubernetesResource from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResource { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointKubernetesResource{} - } - - if len(a) == 0 { - return []MembershipEndpointKubernetesResource{} - } - - items := make([]MembershipEndpointKubernetesResource, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointKubernetesResource(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointKubernetesResource expands an instance of MembershipEndpointKubernetesResource into a JSON -// request object. -func expandMembershipEndpointKubernetesResource(c *Client, f *MembershipEndpointKubernetesResource, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.MembershipCrManifest; !dcl.IsEmptyValueIndirect(v) { - m["membershipCrManifest"] = v - } - if v, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, f.ResourceOptions, res); err != nil { - return nil, fmt.Errorf("error expanding ResourceOptions into resourceOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["resourceOptions"] = v - } - - return m, nil -} - -// flattenMembershipEndpointKubernetesResource flattens an instance of MembershipEndpointKubernetesResource from a JSON -// response object. -func flattenMembershipEndpointKubernetesResource(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResource { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointKubernetesResource{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointKubernetesResource - } - r.MembershipCrManifest = dcl.FlattenSecretValue(m["membershipCrManifest"]) - r.MembershipResources = flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice(c, m["membershipResources"], res) - r.ConnectResources = flattenMembershipEndpointKubernetesResourceConnectResourcesSlice(c, m["connectResources"], res) - r.ResourceOptions = flattenMembershipEndpointKubernetesResourceResourceOptions(c, m["resourceOptions"], res) - - return r -} - -// expandMembershipEndpointKubernetesResourceMembershipResourcesMap expands the contents of MembershipEndpointKubernetesResourceMembershipResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceMembershipResourcesMap(c *Client, f map[string]MembershipEndpointKubernetesResourceMembershipResources, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointKubernetesResourceMembershipResources(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointKubernetesResourceMembershipResourcesSlice expands the contents of MembershipEndpointKubernetesResourceMembershipResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, f []MembershipEndpointKubernetesResourceMembershipResources, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointKubernetesResourceMembershipResources(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointKubernetesResourceMembershipResourcesMap flattens the contents of MembershipEndpointKubernetesResourceMembershipResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceMembershipResourcesMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceMembershipResources { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointKubernetesResourceMembershipResources{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointKubernetesResourceMembershipResources{} - } - - items := make(map[string]MembershipEndpointKubernetesResourceMembershipResources) - for k, item := range a { - items[k] = *flattenMembershipEndpointKubernetesResourceMembershipResources(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice flattens the contents of MembershipEndpointKubernetesResourceMembershipResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceMembershipResourcesSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceMembershipResources { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointKubernetesResourceMembershipResources{} - } - - if len(a) == 0 { - return []MembershipEndpointKubernetesResourceMembershipResources{} - } - - items := make([]MembershipEndpointKubernetesResourceMembershipResources, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointKubernetesResourceMembershipResources(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointKubernetesResourceMembershipResources expands an instance of MembershipEndpointKubernetesResourceMembershipResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceMembershipResources(c *Client, f *MembershipEndpointKubernetesResourceMembershipResources, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Manifest; !dcl.IsEmptyValueIndirect(v) { - m["manifest"] = v - } - if v := f.ClusterScoped; !dcl.IsEmptyValueIndirect(v) { - m["clusterScoped"] = v - } - - return m, nil -} - -// flattenMembershipEndpointKubernetesResourceMembershipResources flattens an instance of MembershipEndpointKubernetesResourceMembershipResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceMembershipResources(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceMembershipResources { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointKubernetesResourceMembershipResources{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointKubernetesResourceMembershipResources - } - r.Manifest = dcl.FlattenString(m["manifest"]) - r.ClusterScoped = dcl.FlattenBool(m["clusterScoped"]) - - return r -} - -// expandMembershipEndpointKubernetesResourceConnectResourcesMap expands the contents of MembershipEndpointKubernetesResourceConnectResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceConnectResourcesMap(c *Client, f map[string]MembershipEndpointKubernetesResourceConnectResources, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointKubernetesResourceConnectResources(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointKubernetesResourceConnectResourcesSlice expands the contents of MembershipEndpointKubernetesResourceConnectResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, f []MembershipEndpointKubernetesResourceConnectResources, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointKubernetesResourceConnectResources(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointKubernetesResourceConnectResourcesMap flattens the contents of MembershipEndpointKubernetesResourceConnectResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceConnectResourcesMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceConnectResources { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointKubernetesResourceConnectResources{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointKubernetesResourceConnectResources{} - } - - items := make(map[string]MembershipEndpointKubernetesResourceConnectResources) - for k, item := range a { - items[k] = *flattenMembershipEndpointKubernetesResourceConnectResources(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointKubernetesResourceConnectResourcesSlice flattens the contents of MembershipEndpointKubernetesResourceConnectResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceConnectResourcesSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceConnectResources { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointKubernetesResourceConnectResources{} - } - - if len(a) == 0 { - return []MembershipEndpointKubernetesResourceConnectResources{} - } - - items := make([]MembershipEndpointKubernetesResourceConnectResources, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointKubernetesResourceConnectResources(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointKubernetesResourceConnectResources expands an instance of MembershipEndpointKubernetesResourceConnectResources into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceConnectResources(c *Client, f *MembershipEndpointKubernetesResourceConnectResources, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Manifest; !dcl.IsEmptyValueIndirect(v) { - m["manifest"] = v - } - if v := f.ClusterScoped; !dcl.IsEmptyValueIndirect(v) { - m["clusterScoped"] = v - } - - return m, nil -} - -// flattenMembershipEndpointKubernetesResourceConnectResources flattens an instance of MembershipEndpointKubernetesResourceConnectResources from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceConnectResources(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceConnectResources { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointKubernetesResourceConnectResources{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointKubernetesResourceConnectResources - } - r.Manifest = dcl.FlattenString(m["manifest"]) - r.ClusterScoped = dcl.FlattenBool(m["clusterScoped"]) - - return r -} - -// expandMembershipEndpointKubernetesResourceResourceOptionsMap expands the contents of MembershipEndpointKubernetesResourceResourceOptions into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceResourceOptionsMap(c *Client, f map[string]MembershipEndpointKubernetesResourceResourceOptions, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipEndpointKubernetesResourceResourceOptionsSlice expands the contents of MembershipEndpointKubernetesResourceResourceOptions into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, f []MembershipEndpointKubernetesResourceResourceOptions, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipEndpointKubernetesResourceResourceOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipEndpointKubernetesResourceResourceOptionsMap flattens the contents of MembershipEndpointKubernetesResourceResourceOptions from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceResourceOptionsMap(c *Client, i interface{}, res *Membership) map[string]MembershipEndpointKubernetesResourceResourceOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipEndpointKubernetesResourceResourceOptions{} - } - - if len(a) == 0 { - return map[string]MembershipEndpointKubernetesResourceResourceOptions{} - } - - items := make(map[string]MembershipEndpointKubernetesResourceResourceOptions) - for k, item := range a { - items[k] = *flattenMembershipEndpointKubernetesResourceResourceOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipEndpointKubernetesResourceResourceOptionsSlice flattens the contents of MembershipEndpointKubernetesResourceResourceOptions from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceResourceOptionsSlice(c *Client, i interface{}, res *Membership) []MembershipEndpointKubernetesResourceResourceOptions { - a, ok := i.([]interface{}) - if !ok { - return []MembershipEndpointKubernetesResourceResourceOptions{} - } - - if len(a) == 0 { - return []MembershipEndpointKubernetesResourceResourceOptions{} - } - - items := make([]MembershipEndpointKubernetesResourceResourceOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipEndpointKubernetesResourceResourceOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipEndpointKubernetesResourceResourceOptions expands an instance of MembershipEndpointKubernetesResourceResourceOptions into a JSON -// request object. -func expandMembershipEndpointKubernetesResourceResourceOptions(c *Client, f *MembershipEndpointKubernetesResourceResourceOptions, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ConnectVersion; !dcl.IsEmptyValueIndirect(v) { - m["connectVersion"] = v - } - if v := f.V1Beta1Crd; !dcl.IsEmptyValueIndirect(v) { - m["v1beta1Crd"] = v - } - - return m, nil -} - -// flattenMembershipEndpointKubernetesResourceResourceOptions flattens an instance of MembershipEndpointKubernetesResourceResourceOptions from a JSON -// response object. -func flattenMembershipEndpointKubernetesResourceResourceOptions(c *Client, i interface{}, res *Membership) *MembershipEndpointKubernetesResourceResourceOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipEndpointKubernetesResourceResourceOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipEndpointKubernetesResourceResourceOptions - } - r.ConnectVersion = dcl.FlattenString(m["connectVersion"]) - r.V1Beta1Crd = dcl.FlattenBool(m["v1beta1Crd"]) - - return r -} - -// expandMembershipStateMap expands the contents of MembershipState into a JSON -// request object. -func expandMembershipStateMap(c *Client, f map[string]MembershipState, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipState(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipStateSlice expands the contents of MembershipState into a JSON -// request object. -func expandMembershipStateSlice(c *Client, f []MembershipState, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipState(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipStateMap flattens the contents of MembershipState from a JSON -// response object. -func flattenMembershipStateMap(c *Client, i interface{}, res *Membership) map[string]MembershipState { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipState{} - } - - if len(a) == 0 { - return map[string]MembershipState{} - } - - items := make(map[string]MembershipState) - for k, item := range a { - items[k] = *flattenMembershipState(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipStateSlice flattens the contents of MembershipState from a JSON -// response object. -func flattenMembershipStateSlice(c *Client, i interface{}, res *Membership) []MembershipState { - a, ok := i.([]interface{}) - if !ok { - return []MembershipState{} - } - - if len(a) == 0 { - return []MembershipState{} - } - - items := make([]MembershipState, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipState(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipState expands an instance of MembershipState into a JSON -// request object. -func expandMembershipState(c *Client, f *MembershipState, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenMembershipState flattens an instance of MembershipState from a JSON -// response object. -func flattenMembershipState(c *Client, i interface{}, res *Membership) *MembershipState { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipState{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipState - } - r.Code = flattenMembershipStateCodeEnum(m["code"]) - - return r -} - -// expandMembershipAuthorityMap expands the contents of MembershipAuthority into a JSON -// request object. -func expandMembershipAuthorityMap(c *Client, f map[string]MembershipAuthority, res *Membership) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMembershipAuthority(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMembershipAuthoritySlice expands the contents of MembershipAuthority into a JSON -// request object. -func expandMembershipAuthoritySlice(c *Client, f []MembershipAuthority, res *Membership) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMembershipAuthority(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMembershipAuthorityMap flattens the contents of MembershipAuthority from a JSON -// response object. -func flattenMembershipAuthorityMap(c *Client, i interface{}, res *Membership) map[string]MembershipAuthority { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipAuthority{} - } - - if len(a) == 0 { - return map[string]MembershipAuthority{} - } - - items := make(map[string]MembershipAuthority) - for k, item := range a { - items[k] = *flattenMembershipAuthority(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMembershipAuthoritySlice flattens the contents of MembershipAuthority from a JSON -// response object. -func flattenMembershipAuthoritySlice(c *Client, i interface{}, res *Membership) []MembershipAuthority { - a, ok := i.([]interface{}) - if !ok { - return []MembershipAuthority{} - } - - if len(a) == 0 { - return []MembershipAuthority{} - } - - items := make([]MembershipAuthority, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipAuthority(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMembershipAuthority expands an instance of MembershipAuthority into a JSON -// request object. -func expandMembershipAuthority(c *Client, f *MembershipAuthority, res *Membership) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Issuer; !dcl.IsEmptyValueIndirect(v) { - m["issuer"] = v - } - - return m, nil -} - -// flattenMembershipAuthority flattens an instance of MembershipAuthority from a JSON -// response object. -func flattenMembershipAuthority(c *Client, i interface{}, res *Membership) *MembershipAuthority { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MembershipAuthority{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMembershipAuthority - } - r.Issuer = dcl.FlattenString(m["issuer"]) - r.WorkloadIdentityPool = dcl.FlattenString(m["workloadIdentityPool"]) - r.IdentityProvider = dcl.FlattenString(m["identityProvider"]) - - return r -} - -// flattenMembershipStateCodeEnumMap flattens the contents of MembershipStateCodeEnum from a JSON -// response object. -func flattenMembershipStateCodeEnumMap(c *Client, i interface{}, res *Membership) map[string]MembershipStateCodeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipStateCodeEnum{} - } - - if len(a) == 0 { - return map[string]MembershipStateCodeEnum{} - } - - items := make(map[string]MembershipStateCodeEnum) - for k, item := range a { - items[k] = *flattenMembershipStateCodeEnum(item.(interface{})) - } - - return items -} - -// flattenMembershipStateCodeEnumSlice flattens the contents of MembershipStateCodeEnum from a JSON -// response object. -func flattenMembershipStateCodeEnumSlice(c *Client, i interface{}, res *Membership) []MembershipStateCodeEnum { - a, ok := i.([]interface{}) - if !ok { - return []MembershipStateCodeEnum{} - } - - if len(a) == 0 { - return []MembershipStateCodeEnum{} - } - - items := make([]MembershipStateCodeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipStateCodeEnum(item.(interface{}))) - } - - return items -} - -// flattenMembershipStateCodeEnum asserts that an interface is a string, and returns a -// pointer to a *MembershipStateCodeEnum with the same value as that string. -func flattenMembershipStateCodeEnum(i interface{}) *MembershipStateCodeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MembershipStateCodeEnumRef(s) -} - -// flattenMembershipInfrastructureTypeEnumMap flattens the contents of MembershipInfrastructureTypeEnum from a JSON -// response object. -func flattenMembershipInfrastructureTypeEnumMap(c *Client, i interface{}, res *Membership) map[string]MembershipInfrastructureTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MembershipInfrastructureTypeEnum{} - } - - if len(a) == 0 { - return map[string]MembershipInfrastructureTypeEnum{} - } - - items := make(map[string]MembershipInfrastructureTypeEnum) - for k, item := range a { - items[k] = *flattenMembershipInfrastructureTypeEnum(item.(interface{})) - } - - return items -} - -// flattenMembershipInfrastructureTypeEnumSlice flattens the contents of MembershipInfrastructureTypeEnum from a JSON -// response object. -func flattenMembershipInfrastructureTypeEnumSlice(c *Client, i interface{}, res *Membership) []MembershipInfrastructureTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []MembershipInfrastructureTypeEnum{} - } - - if len(a) == 0 { - return []MembershipInfrastructureTypeEnum{} - } - - items := make([]MembershipInfrastructureTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMembershipInfrastructureTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenMembershipInfrastructureTypeEnum asserts that an interface is a string, and returns a -// pointer to a *MembershipInfrastructureTypeEnum with the same value as that string. -func flattenMembershipInfrastructureTypeEnum(i interface{}) *MembershipInfrastructureTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MembershipInfrastructureTypeEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Membership) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalMembership(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type membershipDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp membershipApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToMembershipDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]membershipDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []membershipDiff - // For each operation name, create a membershipDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := membershipDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToMembershipApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToMembershipApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (membershipApiOperation, error) { - switch opName { - - case "updateMembershipUpdateMembershipOperation": - return &updateMembershipUpdateMembershipOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractMembershipFields(r *Membership) error { - vEndpoint := r.Endpoint - if vEndpoint == nil { - // note: explicitly not the empty object. - vEndpoint = &MembershipEndpoint{} - } - if err := extractMembershipEndpointFields(r, vEndpoint); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEndpoint) { - r.Endpoint = vEndpoint - } - vState := r.State - if vState == nil { - // note: explicitly not the empty object. - vState = &MembershipState{} - } - if err := extractMembershipStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - r.State = vState - } - vAuthority := r.Authority - if vAuthority == nil { - // note: explicitly not the empty object. - vAuthority = &MembershipAuthority{} - } - if err := extractMembershipAuthorityFields(r, vAuthority); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuthority) { - r.Authority = vAuthority - } - return nil -} -func extractMembershipEndpointFields(r *Membership, o *MembershipEndpoint) error { - vGkeCluster := o.GkeCluster - if vGkeCluster == nil { - // note: explicitly not the empty object. - vGkeCluster = &MembershipEndpointGkeCluster{} - } - if err := extractMembershipEndpointGkeClusterFields(r, vGkeCluster); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeCluster) { - o.GkeCluster = vGkeCluster - } - vKubernetesMetadata := o.KubernetesMetadata - if vKubernetesMetadata == nil { - // note: explicitly not the empty object. - vKubernetesMetadata = &MembershipEndpointKubernetesMetadata{} - } - if err := extractMembershipEndpointKubernetesMetadataFields(r, vKubernetesMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesMetadata) { - o.KubernetesMetadata = vKubernetesMetadata - } - vKubernetesResource := o.KubernetesResource - if vKubernetesResource == nil { - // note: explicitly not the empty object. - vKubernetesResource = &MembershipEndpointKubernetesResource{} - } - if err := extractMembershipEndpointKubernetesResourceFields(r, vKubernetesResource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesResource) { - o.KubernetesResource = vKubernetesResource - } - return nil -} -func extractMembershipEndpointGkeClusterFields(r *Membership, o *MembershipEndpointGkeCluster) error { - return nil -} -func extractMembershipEndpointKubernetesMetadataFields(r *Membership, o *MembershipEndpointKubernetesMetadata) error { - return nil -} -func extractMembershipEndpointKubernetesResourceFields(r *Membership, o *MembershipEndpointKubernetesResource) error { - vResourceOptions := o.ResourceOptions - if vResourceOptions == nil { - // note: explicitly not the empty object. - vResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{} - } - if err := extractMembershipEndpointKubernetesResourceResourceOptionsFields(r, vResourceOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceOptions) { - o.ResourceOptions = vResourceOptions - } - return nil -} -func extractMembershipEndpointKubernetesResourceMembershipResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceMembershipResources) error { - return nil -} -func extractMembershipEndpointKubernetesResourceConnectResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceConnectResources) error { - return nil -} -func extractMembershipEndpointKubernetesResourceResourceOptionsFields(r *Membership, o *MembershipEndpointKubernetesResourceResourceOptions) error { - return nil -} -func extractMembershipStateFields(r *Membership, o *MembershipState) error { - return nil -} -func extractMembershipAuthorityFields(r *Membership, o *MembershipAuthority) error { - return nil -} - -func postReadExtractMembershipFields(r *Membership) error { - vEndpoint := r.Endpoint - if vEndpoint == nil { - // note: explicitly not the empty object. - vEndpoint = &MembershipEndpoint{} - } - if err := postReadExtractMembershipEndpointFields(r, vEndpoint); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEndpoint) { - r.Endpoint = vEndpoint - } - vState := r.State - if vState == nil { - // note: explicitly not the empty object. - vState = &MembershipState{} - } - if err := postReadExtractMembershipStateFields(r, vState); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vState) { - r.State = vState - } - vAuthority := r.Authority - if vAuthority == nil { - // note: explicitly not the empty object. - vAuthority = &MembershipAuthority{} - } - if err := postReadExtractMembershipAuthorityFields(r, vAuthority); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuthority) { - r.Authority = vAuthority - } - return nil -} -func postReadExtractMembershipEndpointFields(r *Membership, o *MembershipEndpoint) error { - vGkeCluster := o.GkeCluster - if vGkeCluster == nil { - // note: explicitly not the empty object. - vGkeCluster = &MembershipEndpointGkeCluster{} - } - if err := extractMembershipEndpointGkeClusterFields(r, vGkeCluster); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGkeCluster) { - o.GkeCluster = vGkeCluster - } - vKubernetesMetadata := o.KubernetesMetadata - if vKubernetesMetadata == nil { - // note: explicitly not the empty object. - vKubernetesMetadata = &MembershipEndpointKubernetesMetadata{} - } - if err := extractMembershipEndpointKubernetesMetadataFields(r, vKubernetesMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesMetadata) { - o.KubernetesMetadata = vKubernetesMetadata - } - vKubernetesResource := o.KubernetesResource - if vKubernetesResource == nil { - // note: explicitly not the empty object. - vKubernetesResource = &MembershipEndpointKubernetesResource{} - } - if err := extractMembershipEndpointKubernetesResourceFields(r, vKubernetesResource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vKubernetesResource) { - o.KubernetesResource = vKubernetesResource - } - return nil -} -func postReadExtractMembershipEndpointGkeClusterFields(r *Membership, o *MembershipEndpointGkeCluster) error { - return nil -} -func postReadExtractMembershipEndpointKubernetesMetadataFields(r *Membership, o *MembershipEndpointKubernetesMetadata) error { - return nil -} -func postReadExtractMembershipEndpointKubernetesResourceFields(r *Membership, o *MembershipEndpointKubernetesResource) error { - vResourceOptions := o.ResourceOptions - if vResourceOptions == nil { - // note: explicitly not the empty object. - vResourceOptions = &MembershipEndpointKubernetesResourceResourceOptions{} - } - if err := extractMembershipEndpointKubernetesResourceResourceOptionsFields(r, vResourceOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceOptions) { - o.ResourceOptions = vResourceOptions - } - return nil -} -func postReadExtractMembershipEndpointKubernetesResourceMembershipResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceMembershipResources) error { - return nil -} -func postReadExtractMembershipEndpointKubernetesResourceConnectResourcesFields(r *Membership, o *MembershipEndpointKubernetesResourceConnectResources) error { - return nil -} -func postReadExtractMembershipEndpointKubernetesResourceResourceOptionsFields(r *Membership, o *MembershipEndpointKubernetesResourceResourceOptions) error { - return nil -} -func postReadExtractMembershipStateFields(r *Membership, o *MembershipState) error { - return nil -} -func postReadExtractMembershipAuthorityFields(r *Membership, o *MembershipAuthority) error { - return nil -} From e08f6a852c9ae9f4780c3e6b79e47da34663d01f Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Thu, 19 Mar 2026 11:48:39 -0700 Subject: [PATCH 13/13] Fix some nonsensical template inclusions --- .../terraform/services/cloudbuild/worker_pool.go.tmpl | 5 ----- .../terraform/services/clouddeploy/delivery_pipeline.go.tmpl | 5 ----- .../terraform/services/clouddeploy/target.go.tmpl | 5 ----- .../terraform/services/containeraws/cluster.go.tmpl | 5 ----- .../terraform/services/containeraws/node_pool.go.tmpl | 5 ----- .../terraform/services/containerazure/azure_client.go.tmpl | 5 ----- .../terraform/services/containerazure/cluster.go.tmpl | 5 ----- .../terraform/services/containerazure/node_pool.go.tmpl | 5 ----- mmv1/third_party/terraform/services/dataplex/asset.go.tmpl | 5 ----- mmv1/third_party/terraform/services/dataplex/lake.go.tmpl | 5 ----- mmv1/third_party/terraform/services/dataplex/zone.go.tmpl | 5 ----- .../terraform/services/dataproc/workflow_template.go.tmpl | 5 ----- .../terraform/services/firebaserules/release.go.tmpl | 5 ----- .../terraform/services/firebaserules/ruleset.go.tmpl | 5 ----- .../terraform/services/recaptchaenterprise/key.go.tmpl | 5 ----- 15 files changed, 75 deletions(-) diff --git a/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl index d3cc6238c479..df7db9698429 100644 --- a/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/cloudbuild/worker_pool.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type WorkerPool struct { diff --git a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl index 40a981ea38fa..e7b98ee13ff5 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/delivery_pipeline.go.tmpl @@ -8,13 +8,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type DeliveryPipeline struct { diff --git a/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl index be22bd836d1a..66a10373229c 100644 --- a/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl +++ b/mmv1/third_party/terraform/services/clouddeploy/target.go.tmpl @@ -8,13 +8,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Target struct { diff --git a/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl index 9a665ed8eb63..5960eaffb6f0 100644 --- a/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/cluster.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Cluster struct { diff --git a/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl index 1f8e1656635d..45c432b3df10 100644 --- a/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containeraws/node_pool.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type NodePool struct { diff --git a/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl index 0b2059bcf98f..0ffd0569a6c8 100644 --- a/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/azure_client.go.tmpl @@ -5,13 +5,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type AzureClient struct { diff --git a/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl index bd373a5f2370..db6508893347 100644 --- a/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/cluster.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Cluster struct { diff --git a/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl index fbd7714e9269..57ee941b9e3d 100644 --- a/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl +++ b/mmv1/third_party/terraform/services/containerazure/node_pool.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type NodePool struct { diff --git a/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl index abedea89dad7..70636673ad5f 100644 --- a/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/asset.go.tmpl @@ -8,13 +8,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Asset struct { diff --git a/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl index 2fca89b04ce8..73ec65e888fc 100644 --- a/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/lake.go.tmpl @@ -8,13 +8,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Lake struct { diff --git a/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl index 94bc6ef6008d..1b0e81b596af 100644 --- a/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl +++ b/mmv1/third_party/terraform/services/dataplex/zone.go.tmpl @@ -8,13 +8,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Zone struct { diff --git a/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl index 61f89747e24d..078b43778b7e 100644 --- a/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl +++ b/mmv1/third_party/terraform/services/dataproc/workflow_template.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type WorkflowTemplate struct { diff --git a/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl index dc32f3cb5212..eda3172fc83d 100644 --- a/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/release.go.tmpl @@ -5,13 +5,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Release struct { diff --git a/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl index 36b1055d12f4..dbbd77c93409 100644 --- a/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl +++ b/mmv1/third_party/terraform/services/firebaserules/ruleset.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Ruleset struct { diff --git a/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl index 197f20db5df8..bb30fd396385 100644 --- a/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl +++ b/mmv1/third_party/terraform/services/recaptchaenterprise/key.go.tmpl @@ -7,13 +7,8 @@ import ( "fmt" "time" -{{- if ne $.TargetVersionName "ga" }} - "google.golang.org/api/googleapi" -{{- end }} dcl "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" -{{- if eq $.TargetVersionName "ga" }} "google.golang.org/api/googleapi" -{{- end }} ) type Key struct {